# -*- coding: utf-8 -*-
"""
SQL 数据导入并根据已有 table-level refers_to_edges 建立行级数据关系（ArangoDB）
依赖：
  pip install fastapi uvicorn aiohttp python-arango pydantic
启动示例：
  uvicorn this_module:app --reload --host 0.0.0.0 --port 8000
接口：
  POST /import-sql-data
  body: { "file_url": "...", "file_name": "xxx.sql" }
返回 JSON 包含导入结果统计
"""
import asyncio
import hashlib
import json
import os
import re
import uuid
import shutil
import zipfile
import aiohttp
import datetime
import logging
from typing import Dict, List, Optional, Any

from fastapi import FastAPI, HTTPException, Body
from pydantic import BaseModel

from arango import ArangoClient
from arango.database import StandardDatabase

# --- settings fallback (replace with your app.config.settings) ---
try:
    from app.config import settings
except Exception:
    class _S:
        temp_dir_root = "/tmp/er_import"
        arango_host = "http://127.0.0.1:8529"
        arango_user = "root"
        arango_password = ""
        dify_prefix = ""
    settings = _S()

# --- logging ---
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
logger = logging.getLogger("sql_data_importer")

app = FastAPI(title="SQL Data Importer (with data-level relations)")

# ---------- request/response models ----------
class ImportRequest(BaseModel):
    file_url: str
    file_name: str
    # optional: specific arango db hint (if you want to override)
    arango_db_hint: Optional[str] = None


class ImportResponse(BaseModel):
    status: str = "success"
    message: Optional[str] = None
    data: Optional[Dict[str, Any]] = None


# ---------- helper utilities ----------
def sanitize_collection_name(table_name: str) -> str:
    name = table_name.strip()
    sanitized = re.sub(r'\W+', '_', name).strip('_').lower()
    if not sanitized:
        sanitized = f"table_{uuid.uuid4().hex[:6]}"
    return sanitized


def sql_unquote(value: str) -> str:
    """Remove surrounding quotes and unescape SQL single-quote escaping ('' -> ')."""
    if value is None:
        return None
    v = value.strip()
    if len(v) >= 2 and ((v[0] == "'" and v[-1] == "'") or (v[0] == '"' and v[-1] == '"')):
        inner = v[1:-1]
        # SQL escapes single quote by doubling it
        inner = inner.replace("''", "'")
        inner = inner.replace('\\"', '"')
        return inner
    return v


def cast_sql_value(raw: str):
    """Try to convert SQL literal to python type (NULL, int, float, bool, string)."""
    if raw is None:
        return None
    s = raw.strip()
    if not s:
        return ""
    up = s.upper()
    if up == "NULL":
        return None
    if up == "TRUE":
        return True
    if up == "FALSE":
        return False
    # quoted string
    if (s[0] == "'" and s[-1] == "'") or (s[0] == '"' and s[-1] == '"'):
        return sql_unquote(s)
    # numeric?
    if re.fullmatch(r"-?\d+", s):
        try:
            return int(s)
        except Exception:
            pass
    if re.fullmatch(r"-?\d+\.\d+([eE][+-]?\d+)?", s) or re.fullmatch(r"-?\d+[eE][+-]?\d+", s):
        try:
            return float(s)
        except Exception:
            pass
    # fallback: raw string
    return s


# ---------- SQL parsing (CREATE TABLE + INSERT statements) ----------
class SQLParser:
    @staticmethod
    def normalize_sql_text(sql: str) -> str:
        # remove /*! ... */ MySQL version comments, and line comments -- / #
        sql = re.sub(r"/\*![\s\S]*?\*/", "", sql)
        sql = re.sub(r"--.*?$", "", sql, flags=re.MULTILINE)
        sql = re.sub(r"#.*?$", "", sql, flags=re.MULTILINE)
        return sql

    @staticmethod
    def parse_create_tables(sql_text: str, db_hint: str = "default") -> Dict[str, Dict]:
        """
        返回 dict: table_name -> {
            "database": dbname,
            "columns": [col1, col2, ...],
            "col_types": {col: type_str},
            "primary_keys": [pkcols]
        }
        """
        sql = SQLParser.normalize_sql_text(sql_text)

        # 1. 从文件内容解析 database
        db_in_file = None
        # 匹配 USE `dbname` 或 USE dbname
        use_db_match = re.search(r"\bUSE\s+`?([A-Za-z0-9_]+)`?", sql, re.I)
        if use_db_match:
            db_in_file = use_db_match.group(1)

        # 如果 USE 没匹配到，尝试匹配 CREATE DATABASE
        if not db_in_file:
            create_db_match = re.search(
                r"\bCREATE\s+DATABASE(?:\s+IF\s+NOT\s+EXISTS)?\s+`?([A-Za-z0-9_]+)`?",
                sql, re.I
            )
            if create_db_match:
                db_in_file = create_db_match.group(1)

        # pattern to capture CREATE TABLE up to the next ')' followed by ENGINE/DEFAULT/; (approx)
        create_table_re = re.compile(
            r"CREATE\s+TABLE\s+(?:IF\s+NOT\s+EXISTS\s+)?(?P<fullname>(?:`[^`]+`|\w+)(?:\s*\.\s*(?:`[^`]+`|\w+))?)\s*\((?P<cols>.*?)\)\s*(?:ENGINE|DEFAULT|COMMENT|;)",
            re.S | re.I
        )

        tables = {}
        for m in create_table_re.finditer(sql):
            fullname = m.group("fullname").strip()
            cols_block = m.group("cols")
            parts = [p.strip(" `") for p in re.split(r'\.', fullname)]
            table_name = parts[-1]

            # 数据库名优先级：CREATE TABLE 显式写的 > 文件里解析到的 > db_hint
            if len(parts) > 1:
                database_name = parts[-2]
            elif db_in_file:
                database_name = db_in_file
            else:
                database_name = db_hint

            # collect primary keys defined at table-level
            primary_keys = []
            pk_inline = re.findall(r"PRIMARY\s+KEY\s*\(([^)]+)\)", cols_block, re.I)
            for pk_def in pk_inline:
                pk_cols = [c.strip(" `") for c in pk_def.split(",")]
                primary_keys.extend(pk_cols)

            # split top-level commas into lines
            col_lines = []
            buf = ""
            level = 0
            in_quote = None
            for ch in cols_block:
                if in_quote:
                    if ch == in_quote:
                        in_quote = None
                    buf += ch
                else:
                    if ch in ("'", '"'):
                        in_quote = ch
                        buf += ch
                    elif ch == '(':
                        level += 1
                        buf += ch
                    elif ch == ')':
                        if level > 0:
                            level -= 1
                        buf += ch
                    elif ch == ',' and level == 0:
                        col_lines.append(buf.strip())
                        buf = ""
                    else:
                        buf += ch
            if buf.strip():
                col_lines.append(buf.strip())

            cols = []
            col_types = {}
            for line in col_lines:
                # skip constraints
                if re.match(r"^(PRIMARY|UNIQUE|KEY|CONSTRAINT|FOREIGN)\b", line, re.I):
                    continue
                cm = re.match(r"^`?([A-Za-z0-9_]+)`?\s+(.+)$", line.strip())
                if not cm:
                    continue
                colname = cm.group(1)
                rest = cm.group(2).strip()
                # extract type token
                tm = re.match(r"^([A-Za-z0-9_]+(\s*\([^)]*\))?)", rest)
                coltype = tm.group(1) if tm else rest.split()[0]
                cols.append(colname)
                col_types[colname] = coltype
                # detect inline PK
                if re.search(r"\bPRIMARY\s+KEY\b", rest, re.I) and colname not in primary_keys:
                    primary_keys.append(colname)

            tables[table_name] = {
                "database": database_name,
                "columns": cols,
                "col_types": col_types,
                "primary_keys": primary_keys
            }

        return tables

    @staticmethod
    def _split_top_level_commas(s: str) -> List[str]:
        parts = []
        buf = ""
        level = 0
        in_quote = None
        i = 0
        L = len(s)
        while i < L:
            ch = s[i]
            if in_quote:
                # SQL uses '' for a literal ', so handle that
                if ch == in_quote:
                    # check escaped quote (two single quotes)
                    if i + 1 < L and s[i + 1] == in_quote:
                        buf += in_quote
                        i += 1  # skip extra
                    else:
                        in_quote = None
                        buf += ch
                else:
                    buf += ch
            else:
                if ch in ("'", '"'):
                    in_quote = ch
                    buf += ch
                elif ch == '(':
                    level += 1
                    buf += ch
                elif ch == ')':
                    if level > 0:
                        level -= 1
                    buf += ch
                elif ch == ',' and level == 0:
                    parts.append(buf.strip())
                    buf = ""
                else:
                    buf += ch
            i += 1
        if buf.strip():
            parts.append(buf.strip())
        return parts

    @staticmethod
    def parse_insert_statements(sql_text: str, schemas: Dict[str, Dict]) -> Dict[str, List[Dict]]:
        """
        返回 dict: table_name -> [ {col:val, ...}, ... ]
        """
        sql = SQLParser.normalize_sql_text(sql_text)
        # regex for INSERT ... VALUES ...; (greedy until semicolon)
        insert_re = re.compile(
            r"INSERT\s+INTO\s+(?P<fullname>(?:`[^`]+`|\w+)(?:\s*\.\s*(?:`[^`]+`|\w+))?)\s*(?:\((?P<cols>[^)]+)\))?\s*VALUES\s*(?P<vals>.*?);",
            re.S | re.I
        )

        inserts_by_table: Dict[str, List[Dict]] = {}

        for m in insert_re.finditer(sql):
            fullname = m.group("fullname").strip()
            parts = [p.strip(" `") for p in re.split(r'\.', fullname)]
            table_name = parts[-1]
            cols_part = m.group("cols")
            vals_part = m.group("vals").strip()

            if cols_part:
                cols = [c.strip(" `") for c in re.split(r',\s*(?![^()]*\))', cols_part)]
            else:
                # use schema column order if available
                cols = schemas.get(table_name, {}).get("columns", [])
                if not cols:
                    # can't map values to columns
                    logger.warning(f"No column list and no schema for table {table_name}; skipping this INSERT.")
                    continue

            # parse values part to extract tuples
            tuples = []
            i = 0
            L = len(vals_part)
            while i < L:
                # find next '(' at top-level
                while i < L and vals_part[i] != '(':
                    i += 1
                if i >= L:
                    break
                start = i
                level = 0
                in_quote = None
                i2 = i
                while i2 < L:
                    ch = vals_part[i2]
                    if in_quote:
                        if ch == in_quote:
                            # handle escaped quotes ''
                            if i2 + 1 < L and vals_part[i2 + 1] == in_quote:
                                i2 += 1  # skip escaped quote
                            else:
                                in_quote = None
                        # continue
                    else:
                        if ch in ("'", '"'):
                            in_quote = ch
                        elif ch == '(':
                            level += 1
                        elif ch == ')':
                            if level > 0:
                                level -= 1
                                if level == 0:
                                    # found complete tuple
                                    break
                    i2 += 1
                if i2 >= L:
                    break
                tuple_str = vals_part[start + 1:i2].strip()  # strip parentheses
                # split top-level commas
                values = SQLParser._split_top_level_commas(tuple_str)
                # cast values
                parsed_values = [cast_sql_value(v) for v in values]
                tuples.append(parsed_values)
                i = i2 + 1

            # now map tuples to columns
            for tup in tuples:
                if len(tup) != len(cols):
                    # mismatch: skip this row but log
                    logger.warning(f"Values count {len(tup)} != columns count {len(cols)} for table {table_name}; skipping row")
                    continue
                row = {cols[idx]: tup[idx] for idx in range(len(cols))}
                inserts_by_table.setdefault(table_name, []).append(row)

        return inserts_by_table


# ---------- Main importer ----------
class SQLDataImporter:
    def __init__(self):
        os.makedirs(settings.temp_dir_root, exist_ok=True)
        self.client: Optional[ArangoClient] = None
        self.db: Optional[StandardDatabase] = None

    def _init_arango(self):
        if not self.client:
            self.client = ArangoClient(hosts=settings.arango_host)

    def _get_db(self, db_name: str) -> StandardDatabase:
        self._init_arango()
        sys_db = self.client.db('_system', username=settings.arango_user, password=settings.arango_password)
        if not sys_db.has_database(db_name):
            # do not create the db_structure db automatically if you don't want; create
            logger.info(f"Database {db_name} not found, will create.")
            sys_db.create_database(db_name)
        self.db = self.client.db(db_name, username=settings.arango_user, password=settings.arango_password)
        return self.db

    async def download_file(self, url: str, file_name: str) -> str:
        if not url.startswith(settings.dify_prefix or "") and settings.dify_prefix and not url.startswith("http"):
            full_url = settings.dify_prefix.rstrip("/") + "/" + url.lstrip("/")
        else:
            full_url = url

        timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
        unique = uuid.uuid4().hex[:8]
        tempdir = os.path.join(settings.temp_dir_root, f"tmp_{timestamp}_{unique}")
        os.makedirs(tempdir, exist_ok=True)
        filename = file_name or full_url.split("/")[-1]
        path = os.path.join(tempdir, filename)
        try:
            async with aiohttp.ClientSession() as session:
                async with session.get(full_url) as resp:
                    if resp.status != 200:
                        raise HTTPException(status_code=400, detail=f"下载失败，HTTP {resp.status}")
                    with open(path, "wb") as f:
                        while True:
                            chunk = await resp.content.read(1024 * 1024)
                            if not chunk:
                                break
                            f.write(chunk)
            logger.info(f"Downloaded file to {path}")
            return path
        except Exception as e:
            logger.error(f"download error: {e}")
            raise HTTPException(status_code=500, detail=f"下载失败: {e}")

    def extract_zip(self, zip_path: str) -> List[str]:
        extract_dir = os.path.join(os.path.dirname(zip_path), "extracted")
        os.makedirs(extract_dir, exist_ok=True)
        sql_files = []
        try:
            with zipfile.ZipFile(zip_path, 'r') as zf:
                entries = [e for e in zf.namelist() if not e.endswith('/')]
                if not entries:
                    raise HTTPException(status_code=400, detail="ZIP 文件为空")
                for e in entries:
                    if not e.lower().endswith('.sql'):
                        raise HTTPException(status_code=400, detail="ZIP 内必须只包含 SQL 文件 (.sql)")
                    zf.extract(e, extract_dir)
                    sql_files.append(os.path.join(extract_dir, e))
            return sql_files
        except zipfile.BadZipFile as e:
            raise HTTPException(status_code=400, detail=f"ZIP 文件无效: {e}")

    def fetch_table_level_relations(self, arango_db_name: str, parsed_table_names: List[str]) -> List[Dict]:
        """
        读取 arango db 中已有的 foreign_key_edges（column-level），
        并筛选出只涉及 parsed_table_names 的关系。
        返回格式：
            [
              {
                "source_table": "orders",
                "source_column": "product_id",
                "target_table": "products",
                "target_column": "product_id"
              }, ...
            ]
        """
        arango_db_name = f"db_structure_{arango_db_name}"
        db = self._get_db(arango_db_name)
        relations = []
        if not db.has_collection("foreign_key_edges"):
            logger.info("No foreign_key_edges collection found in arango DB.")
            return relations

        try:
            for edge in db.collection("foreign_key_edges").all():
                _from = edge.get("_from", "")
                _to = edge.get("_to", "")
                if not _from or not _to:
                    continue

                def parse_column_key(key: str):
                    """
                    从 "columns/{db}:{table}:{column}" 解析出 (table, column)
                    """
                    try:
                        parts = key.split("/", 1)[-1].split(":")
                        if len(parts) >= 3:
                            table = parts[-2]
                            column = parts[-1]
                            return table, column
                    except Exception:
                        pass
                    return None, None

                from_table, from_column = parse_column_key(_from)
                to_table, to_column = parse_column_key(_to)

                # 只保留涉及我们解析到的表的关系
                if from_table in parsed_table_names and to_table in parsed_table_names:
                    relations.append({
                        "source_table": from_table,
                        "source_column": from_column,
                        "target_table": to_table,
                        "target_column": to_column,
                    })

        except Exception as e:
            logger.error(f"Failed reading foreign_key_edges: {e}")

        logger.info(f"Fetched {len(relations)} column-level relations")
        return relations

    def ensure_collection(self, db: StandardDatabase, name: str, edge: bool = False):
        if not db.has_collection(name):
            db.create_collection(name, edge=edge)
            logger.info(f"Created collection: {name} (edge={edge})")

    def import_data_and_link(self, arango_db_name: str, schemas: Dict[str, Dict], inserts: Dict[str, List[Dict]]):
        dataDbName = f"db_data_{arango_db_name}"
        db = self._get_db(dataDbName)

        # ---------- 第 1 步：准备关系与索引结构 ----------
        parsed_table_names = list({*schemas.keys(), *inserts.keys()})
        table_relations = self.fetch_table_level_relations(arango_db_name, parsed_table_names)

        # 收集每个表需要建立索引的列
        index_columns_by_table: Dict[str, set] = {}
        for r in table_relations:
            if r["source_column"]:
                index_columns_by_table.setdefault(r["source_table"], set()).add(r["source_column"])
            if r["target_column"]:
                index_columns_by_table.setdefault(r["target_table"], set()).add(r["target_column"])

        # 创建数据 collection & 边 collection
        table_to_collection: Dict[str, str] = {}
        for tbl in parsed_table_names:
            coll_name = sanitize_collection_name(tbl)
            table_to_collection[tbl] = coll_name
            self.ensure_collection(db, coll_name, edge=False)
        data_edge_coll = "refers_to_data_edges"
        self.ensure_collection(db, data_edge_coll, edge=True)

        # 索引结构：index_map[table][col][val] = list(_key)
        index_map: Dict[str, Dict[str, Dict[Any, List[str]]]] = {}
        inserted_counts: Dict[str, int] = {}

        # ---------- 第 2 步：插入数据并建立索引 ----------
        for table, rows in inserts.items():
            coll_name = table_to_collection.get(table)
            if not coll_name:
                logger.warning(f"No collection mapping for table {table}, skipping rows.")
                continue
            coll = db.collection(coll_name)
            index_map.setdefault(table, {})
            inserted_counts[table] = 0

            schema_cols = schemas.get(table, {}).get("columns", [])
            pk_cols = schemas.get(table, {}).get("primary_keys", [])

            for row in rows:
                # 补齐缺失列
                for c in schema_cols:
                    if c not in row:
                        row[c] = None

                # 生成 _key
                doc_key = None
                if pk_cols:
                    pk_vals = [str(row.get(pk) if row.get(pk) is not None else "") for pk in pk_cols]
                    if all(v != "" for v in pk_vals):
                        doc_key = f"{table}:{':'.join(pk_vals)}"
                if not doc_key:
                    # doc_key = f"{table}:{uuid.uuid4().hex[:12]}"
                    row_str = json.dumps(row, sort_keys=True, default=str)
                    row_hash = hashlib.md5(row_str.encode()).hexdigest()[:12]
                    doc_key = f"{table}:{row_hash}"

                doc = dict(row, _key=doc_key)

                try:
                    coll.insert(doc, overwrite=True)
                    inserted_counts[table] += 1
                except Exception as e:
                    logger.warning(f"Insert row into {coll_name} failed: {e}. Retrying without _key.")
                    try:
                        del doc["_key"]
                        res = coll.insert(doc)
                        doc_key = res.get("_key") or res.get("key") or f"{table}:{uuid.uuid4().hex[:12]}"
                    except Exception as e2:
                        logger.error(f"Failed to insert doc into {coll_name}: {e2}")
                        continue

                # 更新索引
                for col in index_columns_by_table.get(table, set()):
                    index_map[table].setdefault(col, {})
                    val = row.get(col)
                    index_map[table][col].setdefault(val, []).append(doc_key)

        # ---------- 第 3 步：建立关系边 ----------
        created_edges = 0
        edge_coll = db.collection(data_edge_coll)

        for rel in table_relations:
            src_tbl, tgt_tbl = rel["source_table"], rel["target_table"]
            src_col, tgt_col = rel.get("source_column"), rel.get("target_column")
            if not src_col or not tgt_col:
                logger.info(f"Relation {src_tbl} -> {tgt_tbl} missing via_columns, skipped.")
                continue
            if src_col not in index_map.get(src_tbl, {}):
                logger.info(f"No indexed values for {src_tbl}.{src_col}, skipping.")
                continue
            if tgt_col not in index_map.get(tgt_tbl, {}):
                logger.info(f"No indexed values for {tgt_tbl}.{tgt_col}, skipping.")
                continue

            src_coll = table_to_collection[src_tbl]
            tgt_coll = table_to_collection[tgt_tbl]

            for val, src_keys in index_map[src_tbl][src_col].items():
                tgt_keys = index_map[tgt_tbl][tgt_col].get(val)
                if not tgt_keys:
                    continue
                for sk in src_keys:
                    for tk in tgt_keys:
                        from_ref = f"{src_coll}/{sk}"
                        to_ref = f"{tgt_coll}/{tk}"
                        try:
                            edge_key = f"ref_to_row:{from_ref}--{to_ref}".replace("/", "_")
                            edge_coll.insert({
                                "_from": from_ref,
                                "_to": to_ref,
                                "relation": "REFERS_TO_ROW",
                                "source_table": src_tbl,
                                "target_table": tgt_tbl,
                                "source_column": src_col,
                                "target_column": tgt_col,
                                "value": val,
                                "_key": edge_key,
                            },overwrite=True)
                            created_edges += 1
                        except Exception as e:
                            logger.debug(f"Edge insert failed: {e}")
        # ---------- 第 4 步：创建或更新图 ----------
        graph_name = "data_graph"
        edge_definitions = [{
            "edge_collection": data_edge_coll,
            "from_vertex_collections": list(table_to_collection.values()),
            "to_vertex_collections": list(table_to_collection.values())
        }]

        if db.has_graph(graph_name):
            # 更新现有图
            graph = db.graph(graph_name)
            # 删除所有现有边定义
            for ed in graph.edge_definitions():
                graph.delete_edge_definition(ed["edge_collection"])
            # 添加新的边定义
            for ed in edge_definitions:
                graph.create_edge_definition(**ed)
            graph_update_status = "updated"
        else:
            # 创建新图
            db.create_graph(
                name=graph_name,
                edge_definitions=edge_definitions
            )
            graph_update_status = "created"

        logger.info(f"Graph '{graph_name}' {graph_update_status} with {len(edge_definitions)} edge definitions")

        return {
            "inserted_counts": inserted_counts,
            "created_row_edges": created_edges,
            "relations_used": len(table_relations)
        }

    async def process(self, file_url: str, file_name: str, arango_db_hint: Optional[str] = None) -> Dict[str, Any]:
        # 1. download
        file_path = await self.download_file(file_url, file_name)

        # 2. extract or collect sql files
        try:
            if file_path.lower().endswith(".zip"):
                sql_files = self.extract_zip(file_path)
            elif file_path.lower().endswith(".sql"):
                sql_files = [file_path]
            else:
                raise HTTPException(status_code=400, detail="仅支持 .sql 或 .zip(仅包含 .sql) 文件")
        except Exception:
            shutil.rmtree(os.path.dirname(file_path), ignore_errors=True)
            raise

        # 3. parse all sql files (aggregate text)
        all_text = ""
        for p in sql_files:
            with open(p, "r", encoding="utf-8", errors="ignore") as f:
                all_text += "\n" + f.read()

        # choose db_hint: prefer explicit hint param; else detect from CREATE TABLE schema (first one); else filename base
        db_hint = arango_db_hint or os.path.splitext(os.path.basename(file_name))[0]
        # parse CREATE TABLE -> schemas
        schemas = SQLParser.parse_create_tables(all_text, db_hint)
        # if parsed CREATE has fullnames with db, prefer that first parsed db as hint
        if schemas:
            first_table = next(iter(schemas.values()))
            if first_table.get("database"):
                db_hint = first_table["database"]

        # parse INSERTs
        inserts = SQLParser.parse_insert_statements(all_text, schemas)

        if not inserts:
            # nothing to insert, cleanup and exit
            shutil.rmtree(os.path.dirname(file_path), ignore_errors=True)
            raise HTTPException(status_code=400, detail="未在 SQL 中找到 INSERT 语句或无法解析")

        # target arango db for schema info (table-level relations stored in db_structure_{db_hint})
        arango_db_name = f"{db_hint}"#f"db_structure_{db_hint}"

        result = self.import_data_and_link(arango_db_name, schemas, inserts)

        # cleanup
        try:
            shutil.rmtree(os.path.dirname(file_path), ignore_errors=True)
        except Exception:
            pass

        return {
            "arango_db": arango_db_name,
            "tables_parsed": list({*schemas.keys(), *inserts.keys()}),
            **result
        }

    def process_file(self, file_path: str, file_name: str, arango_db_hint: Optional[str] = None) -> Dict[str, Any]:
        """
        已有文件路径时调用，解析 schema 和 inserts 并导入 Arango
        """
        # 1. 处理 zip 或单个 sql
        try:
            if file_path.lower().endswith(".zip"):
                sql_files = self.extract_zip(file_path)
            elif file_path.lower().endswith(".sql"):
                sql_files = [file_path]
            else:
                raise HTTPException(status_code=400, detail="仅支持 .sql 或 .zip 文件")
        except Exception:
            shutil.rmtree(os.path.dirname(file_path), ignore_errors=True)
            raise

        # 2. 合并解析结果
        all_schemas: Dict[str, Any] = {}
        all_inserts: Dict[str, List[Dict[str, Any]]] = {}

        for p in sql_files:
            with open(p, "r", encoding="utf-8", errors="ignore") as f:
                file_text = f.read()

                # 解析 schema
                schemas = SQLParser.parse_create_tables(file_text, "default")
                if not schemas:
                    continue
                # 合并 schemas
                for tbl, info in schemas.items():
                    all_schemas[tbl] = info

                # 解析 inserts
                inserts = SQLParser.parse_insert_statements(file_text, schemas)
                for tbl, rows in inserts.items():
                    all_inserts.setdefault(tbl, []).extend(rows)

        if not all_inserts:
            shutil.rmtree(os.path.dirname(file_path), ignore_errors=True)
            raise HTTPException(status_code=400, detail="未在 SQL 中找到 INSERT 语句或无法解析")

        # 3. 确定 db_hint
        db_hint = arango_db_hint or os.path.splitext(os.path.basename(file_name))[0]
        if all_schemas:
            first_table = next(iter(all_schemas.values()))
            if first_table.get("database"):
                db_hint = first_table["database"]

        # 4. 导入 Arango
        arango_db_name = f"{db_hint}"
        result = self.import_data_and_link(arango_db_name, all_schemas, all_inserts)

        # 清理
        try:
            shutil.rmtree(os.path.dirname(file_path), ignore_errors=True)
        except Exception:
            pass

        return {
            "arango_db": arango_db_name,
            "tables_parsed": list({*all_schemas.keys(), *all_inserts.keys()}),
            **result
        }

    async def analyze_sql_file(self, file_path: str) -> Dict[str, Any]:
        """
        解析 SQL 文件，统计表数量最多的数据库信息，并返回所有数据库的名称和描述。
        返回格式：
        {
          "db_name": "xxx",
          "db_description": "xxx",
          "total_tables": 12,
          "tables": ["t1", "t2", ...],
          "top10_tables": [
            {"table_name": "orders", "row_count": 1000, "column_count": 12},
            ...
          ],
          "all_databases": [
            {"name": "db1", "description": "desc1"},
            {"name": "db2", "description": "desc2"},
            ...
          ]
        }
        """
        # 1. 处理 zip 或单个 sql
        if file_path.lower().endswith(".zip"):
            sql_files = self.extract_zip(file_path)
        elif file_path.lower().endswith(".sql"):
            sql_files = [file_path]
        else:
            raise HTTPException(status_code=400, detail="仅支持 .sql 或 .zip 文件")

        # 2. 解析所有 SQL 文件
        all_schemas = {}
        all_inserts = {}
        # all_text = ""
        db_descriptions = {}  # 存储所有数据库的描述

        for p in sql_files:
            with open(p, "r", encoding="utf-8", errors="ignore") as f:
                file_text = f.read()
                # all_text += "\n" + file_text

                # 解析当前文件的数据库描述
                # 清理 MySQL 特殊注释
                clean_text = re.sub(r"/\*![0-9]+\s*(.*?)\s*\*/", r"\1", file_text)

                db_desc_matches = re.findall(
                    r"CREATE\s+DATABASE(?:\s+IF\s+NOT\s+EXISTS)?\s+`?([a-zA-Z0-9_]+)`?\s*(?:.*?COMMENT\s*=\s*'([^']*)')?",
                    clean_text,
                    re.I | re.S
                )

                for db_name, db_desc in db_desc_matches:
                    if db_name not in db_descriptions:
                        db_descriptions[db_name] = db_desc.strip() if db_desc else None
                # 3. 解析 schema 和 inserts
                schemas = SQLParser.parse_create_tables(file_text, "default")
                if not schemas:
                    continue
                inserts = SQLParser.parse_insert_statements(file_text, schemas)

                # ✅ 合并 schemas
                for tbl, info in schemas.items():
                    all_schemas[tbl] = info  # 后出现的覆盖之前的

                # ✅ 合并 inserts
                for tbl, rows in inserts.items():
                    all_inserts.setdefault(tbl, []).extend(rows)



        # 4. 按数据库分组
        db_to_tables: Dict[str, List[str]] = {}
        for tbl, info in all_schemas.items():
            dbname = info.get("database", "default")
            db_to_tables.setdefault(dbname, []).append(tbl)

        if not db_to_tables:
            raise HTTPException(status_code=400, detail="未识别到数据库")

        # 5. 找表数量最多的数据库
        db_name, tables = max(db_to_tables.items(), key=lambda kv: len(kv[1]))

        # 6. 构建所有数据库的信息
        all_databases = []
        for db_name_in_file in db_to_tables.keys():
            all_databases.append({
                "name": db_name_in_file,
                "description": db_descriptions.get(db_name_in_file)
            })

        # 7. 提取主数据库的描述（表最多的数据库）
        db_description = db_descriptions.get(db_name)

        # 8. 构造表统计信息
        table_infos = []
        for tbl in tables:
            row_count = len(all_inserts.get(tbl, []))
            col_count = len(all_schemas.get(tbl, {}).get("columns", []))
            table_infos.append({
                "table_name": tbl,
                "row_count": row_count,
                "column_count": col_count,
            })

        # 9. 排序取前10
        top10 = sorted(table_infos, key=lambda x: x["row_count"], reverse=True)[:10]

        return {
            "db_name": db_name,
            "db_description": db_description,
            "total_tables": len(tables),
            "tables": tables,
            "top10_tables": top10,
            "all_databases": all_databases  # 新增：所有数据库的名称和描述
        }


async def main():
    analyzer = SQLDataImporter()

    # 这里换成你自己的 zip 文件路径
    file_path = "schema.zip"

    result = await analyzer.analyze_sql_file(file_path)
    print(result)


if __name__ == "__main__":
    asyncio.run(main())

# ---------- FastAPI endpoint ----------
# @app.post("/import-sql-data", response_model=ImportResponse)
# async def import_sql_data(req: ImportRequest = Body(...)):
#     svc = SQLDataImporter()
#     try:
#         res = await svc.process(req.file_url, req.file_name, req.arango_db_hint)
#         return ImportResponse(data=res)
#     except HTTPException as he:
#         logger.error(f"Import failed: {he.detail}")
#         raise he
#     except Exception as e:
#         logger.exception("Unexpected error during import")
#         return ImportResponse(status="error", message=str(e))
#
#
# # If you want to test locally as a script (not via FastAPI), you can add a small runner here.
# if __name__ == "__main__":
#     import uvicorn
#
#     uvicorn.run(app, host="0.0.0.0", port=80)
