import os
import re
import uuid
import datetime
import shutil
import zipfile
import logging
from typing import Dict, List, Optional, Tuple

import aiohttp
from fastapi import FastAPI, HTTPException, Body
from pydantic import BaseModel
from arango import ArangoClient
from arango.exceptions import ArangoError
from arango.database import StandardDatabase
from arango.graph import Graph as ArangoGraph
from starlette.middleware.cors import CORSMiddleware

from app.config import settings

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# 初始化FastAPI应用
app = FastAPI(title="SQL导入ArangoDB服务", description="解析SQL文件并导入ArangoDB")

app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)


# 请求/响应模型
class SQLImportRequest(BaseModel):
    file_url: str
    file_name: str
    graph_name: Optional[str] = "table_er_graph"
    db_name: Optional[str] = None  # 可选的自定义数据库名


class SQLImportResponse(BaseModel):
    status: str = "success"
    message: Optional[str] = None
    data: Optional[Dict] = None


class SQLToArangoDBService:
    def __init__(self):
        os.makedirs(settings.temp_dir_root, exist_ok=True)
        self.client = None
        self.db = None
        self.db_mapping = {}  # 存储数据库名到ArangoDB名的映射

    def _init_arango_client(self):
        if not self.client:
            try:
                self.client = ArangoClient(hosts=settings.arango_host)
                logger.info("ArangoDB客户端初始化成功")
            except Exception as e:
                logger.error(f"ArangoDB客户端初始化失败: {str(e)}")
                raise HTTPException(status_code=500, detail=f"无法连接到ArangoDB: {str(e)}")

    def _get_or_create_db(self, db_name: str) -> StandardDatabase:
        """获取或创建指定的数据库"""
        self._init_arango_client()

        sys_db = self.client.db('_system', username=settings.arango_user, password=settings.arango_password)

        # 检查数据库是否存在
        if not sys_db.has_database(db_name):
            sys_db.create_database(db_name)
            logger.info(f"创建新数据库: {db_name}")

        # 连接到目标数据库
        self.db = self.client.db(db_name, username=settings.arango_user, password=settings.arango_password)
        logger.info(f"已连接到数据库: {db_name}")

        return self.db

    async def download_file(self, url: str, file_name: str) -> str:
        """下载文件并返回本地路径"""
        # 处理URL前缀
        if not url.startswith(settings.dify_prefix) and settings.dify_prefix and not url.startswith("http"):
            full_url = settings.dify_prefix.rstrip("/") + "/" + url.lstrip("/")
        else:
            full_url = url

        # 创建临时目录
        timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
        unique_id = uuid.uuid4().hex[:8]
        temp_dir = os.path.join(settings.temp_dir_root, f"tmp_{timestamp}_{unique_id}")
        os.makedirs(temp_dir, exist_ok=True)

        # 生成文件名
        if not file_name:
            file_name = f"sql_import_{unique_id}.sql"
        file_path = os.path.join(temp_dir, file_name)

        try:
            async with aiohttp.ClientSession() as session:
                async with session.get(full_url) as resp:
                    if resp.status != 200:
                        logger.error(f"文件下载失败，URL: {full_url}，状态码: {resp.status}")
                        raise HTTPException(status_code=400, detail=f"文件下载失败，状态码: {resp.status}")
                    with open(file_path, "wb") as f:
                        while True:
                            chunk = await resp.content.read(1024 * 1024)
                            if not chunk:
                                break
                            f.write(chunk)
            logger.info(f"文件下载成功，路径: {file_path}")
            return file_path
        except Exception as e:
            logger.error(f"下载过程异常: {str(e)}")
            raise HTTPException(status_code=500, detail=f"文件下载失败: {str(e)}")

    def extract_zip(self, zip_path: str) -> List[str]:
        """解压ZIP文件并返回SQL文件列表"""
        sql_files = []
        extract_dir = os.path.join(os.path.dirname(zip_path), "sql_file")
        os.makedirs(extract_dir, exist_ok=True)

        try:
            with zipfile.ZipFile(zip_path, 'r') as zip_ref:
                # 过滤并提取所有SQL文件
                for file in zip_ref.namelist():
                    if file.lower().endswith('.sql'):
                        zip_ref.extract(file, extract_dir)
                        extracted_path = os.path.join(extract_dir, file)
                        sql_files.append(extracted_path)
                        logger.info(f"提取文件: {extracted_path}")
                    else:
                        logger.warning(f"跳过非SQL文件: {file}")

            if not sql_files:
                logger.warning(f"ZIP文件中未找到SQL文件: {zip_path}")
                raise HTTPException(status_code=400, detail="ZIP文件中未找到SQL文件")

            return sql_files
        except zipfile.BadZipFile as e:
            logger.error(f"ZIP文件损坏: {str(e)}")
            raise HTTPException(status_code=400, detail=f"ZIP文件损坏: {str(e)}")
        except Exception as e:
            logger.error(f"解压ZIP文件失败: {str(e)}")
            raise HTTPException(status_code=500, detail=f"解压ZIP文件失败: {str(e)}")

    def parse_sql(self, sql_content: str) -> Tuple[Dict[str, List[Dict]], List[Dict]]:
        """解析SQL文件内容，提取表结构和数据库信息"""
        # 移除注释
        sql_content = re.sub(r'--.*?$', '', sql_content, flags=re.MULTILINE)
        sql_content = re.sub(r'/\*.*?\*/', '', sql_content, flags=re.DOTALL)

        databases = {}
        current_db = None
        tables = []
        relationships = []

        # 匹配CREATE DATABASE语句
        create_db_pattern = re.compile(
            r'CREATE\s+DATABASE\s+(?:IF\s+NOT\s+EXISTS\s+)?`?(\w+)`?',
            re.IGNORECASE
        )

        # 匹配USE语句
        use_pattern = re.compile(
            r'USE\s+`?(\w+)`?',
            re.IGNORECASE
        )

        # 匹配CREATE TABLE语句
        create_table_pattern = re.compile(
            r'CREATE\s+TABLE\s+(?:IF\s+NOT\s+EXISTS\s+)?`?(\w+)`?\s*\((.*?)\)\s*(?:ENGINE.*?|)\s*;',
            re.IGNORECASE | re.DOTALL
        )

        # 匹配外键
        foreign_key_pattern = re.compile(
            r'FOREIGN\s+KEY\s*\(`?(\w+)`?\)\s+REFERENCES\s+`?(\w+)`?\s*\(`?(\w+)`?\)',
            re.IGNORECASE
        )

        # 先处理数据库和USE语句
        for line in sql_content.split('\n'):
            db_match = create_db_pattern.search(line)
            if db_match:
                db_name = db_match.group(1)
                databases[db_name] = []
                logger.info(f"发现数据库: {db_name}")

            use_match = use_pattern.search(line)
            if use_match:
                current_db = use_match.group(1)
                logger.info(f"使用数据库: {current_db}")

        # 处理CREATE TABLE语句
        table_matches = create_table_pattern.findall(sql_content)
        for table_match in table_matches:
            table_name = table_match[0]
            table_body = table_match[1]

            # 解析表注释（在整个CREATE TABLE末尾）
            table_comment_match = re.search(r"COMMENT\s*=\s*'([^']*)'", sql_content, re.IGNORECASE)
            table_comment = table_comment_match.group(1) if table_comment_match else ""

            # 分割表体每一行
            lines = [line.strip().rstrip(",") for line in table_body.splitlines() if line.strip()]

            columns = []
            pk_columns = set()

            # 先找主键信息
            for line in lines:
                pk_match = re.match(r'PRIMARY\s+KEY\s*\((.*?)\)', line, re.IGNORECASE)
                if pk_match:
                    cols = [c.strip(" `") for c in pk_match.group(1).split(",")]
                    pk_columns.update(cols)

            # 解析列定义
            for line in lines:
                if (line.upper().startswith("PRIMARY KEY") or
                        line.upper().startswith("FOREIGN KEY") or
                        line.upper().startswith("CONSTRAINT") or
                        line.upper().startswith("UNIQUE KEY") or
                        line.upper().startswith("KEY ")):
                    continue  # 跳过约束定义

                col_match = re.match(
                    r'`(?P<name>\w+)`\s+'
                    r'(?P<type>[A-Z]+(?:\(\d+\s*(?:,\s*\d+)?\))?)'  # 修复：允许 DECIMAL(10, 2)
                    r'(?:\s+(?P<nullable>NOT NULL|NULL))?'
                    r'(?:\s+DEFAULT\s+(?P<default>(?:\'[^\']*\'|\w+|\([^)]*\))))?'
                    r'(?:\s+COMMENT\s+\'(?P<comment>[^\']*)\')?',
                    line,
                    re.IGNORECASE
                )
                if not col_match:
                    continue

                col_name = col_match.group("name")
                raw_type = col_match.group("type").upper()

                # 分离类型和长度/小数位
                type_match = re.match(r'([A-Z]+)(?:\((\d+)\s*(?:,\s*(\d+))?\))?', col_match.group("type"),
                                      re.IGNORECASE)
                col_type = type_match.group(1).upper()
                length = int(type_match.group(2)) if type_match and type_match.group(2) else None
                scale = int(type_match.group(3)) if type_match and type_match.group(3) else None

                columns.append({
                    "name": col_name,
                    "type": col_type,
                    "length": length,
                    "scale": scale,
                    "nullable": not (col_match.group("nullable") and col_match.group("nullable").upper() == "NOT NULL"),
                    "default": col_match.group("default"),
                    "comment": col_match.group("comment") or "",
                    "pk": col_name in pk_columns
                })

            # 外键信息
            fk_matches = foreign_key_pattern.findall(table_body)
            for fk_match in fk_matches:
                source_column = fk_match[0]
                target_table = fk_match[1]
                target_column = fk_match[2]

                relationships.append({
                    "source_database": current_db,
                    "source_table": table_name,
                    "source_column": source_column,
                    "target_database": current_db,
                    "target_table": target_table,
                    "target_column": target_column
                })

            table_data = {
                "name": table_name,
                "comment": table_comment,
                "columns": columns,
                "database": current_db
            }
            tables.append(table_data)

            if current_db and current_db in databases:
                databases[current_db].append(table_data)

        return databases, relationships

    def parse_sql_group(self, sql_content: str, filename: str) -> Tuple[Dict[str, List[Dict]], List[Dict]]:
        """解析group_开头的SQL文件内容，提取表结构和数据库信息"""
        # 移除注释
        sql_content = re.sub(r'--.*?$', '', sql_content, flags=re.MULTILINE)
        sql_content = re.sub(r'/\*.*?\*/', '', sql_content, flags=re.DOTALL)
        # 获取基础文件名（不含路径和扩展名）
        base_filename = os.path.splitext(os.path.basename(filename))[0] if filename else "default"

        databases = {base_filename: []}  # 使用文件名作为database key
        tables = []
        relationships = []

        # 匹配USE语句
        use_pattern = re.compile(
            r'USE\s+`?(\w+)`?',
            re.IGNORECASE
        )

        # 匹配CREATE TABLE语句
        create_table_pattern = re.compile(
            r'CREATE\s+TABLE\s+(?:IF\s+NOT\s+EXISTS\s+)?`?(\w+)`?\s*\((.*?)\)\s*(?:ENGINE.*?|)\s*;',
            re.IGNORECASE | re.DOTALL
        )

        # 匹配外键
        foreign_key_pattern = re.compile(
            r'FOREIGN\s+KEY\s*\(`?(\w+)`?\)\s+REFERENCES\s+`?(\w+)`?\s*\(`?(\w+)`?\)',
            re.IGNORECASE
        )

        # 将SQL内容分割成USE语句和CREATE TABLE语句的块
        lines = sql_content.split('\n')
        current_db = None
        db_blocks = []  # 存储(数据库名, 起始行, 结束行)的元组

        # 首先找出所有USE语句的位置和对应的数据库
        use_positions = []
        for i, line in enumerate(lines):
            use_match = use_pattern.search(line)
            if use_match:
                use_positions.append((i, use_match.group(1)))

        # 如果没有USE语句，直接返回空结果
        if not use_positions:
            return databases, relationships

        # 确定每个数据库块的范围
        for i, (line_num, db_name) in enumerate(use_positions):
            start_line = line_num
            if i < len(use_positions) - 1:
                end_line = use_positions[i + 1][0] - 1  # 下一个USE语句的前一行
            else:
                end_line = len(lines) - 1  # 文件末尾

            db_blocks.append((db_name, start_line, end_line))

        # 处理每个CREATE TABLE语句
        table_matches = create_table_pattern.finditer(sql_content)
        for table_match in table_matches:
            table_name = table_match.group(1)
            table_body = table_match.group(2)

            # 找到这个表在文件中的位置
            table_pos = table_match.start()

            # 计算表所在的行号
            table_line = sql_content.count('\n', 0, table_pos)

            # 确定这个表属于哪个数据库块
            table_db = None
            for db_name, start_line, end_line in db_blocks:
                if start_line <= table_line <= end_line:
                    table_db = db_name
                    break

            if table_db is None:
                # 如果没有找到匹配的数据库块，使用第一个数据库
                table_db = db_blocks[0][0] if db_blocks else None

            if table_db is None:
                continue

            # 解析表注释
            table_comment_match = re.search(r"COMMENT\s*=\s*'([^']*)'", table_match.group(0), re.IGNORECASE)
            table_comment = table_comment_match.group(1) if table_comment_match else ""

            # 分割表体每一行
            body_lines = [line.strip().rstrip(",") for line in table_body.splitlines() if line.strip()]

            columns = []
            pk_columns = set()

            # 先找主键信息
            for line in body_lines:
                pk_match = re.match(r'PRIMARY\s+KEY\s*\((.*?)\)', line, re.IGNORECASE)
                if pk_match:
                    cols = [c.strip(" `") for c in pk_match.group(1).split(",")]
                    pk_columns.update(cols)

            # 解析列定义
            for line in body_lines:
                if (line.upper().startswith("PRIMARY KEY") or
                        line.upper().startswith("FOREIGN KEY") or
                        line.upper().startswith("CONSTRAINT") or
                        line.upper().startswith("UNIQUE KEY") or
                        line.upper().startswith("KEY ")):
                    continue  # 跳过约束定义

                col_match = re.match(
                    r'`(?P<name>\w+)`\s+'
                    r'(?P<type>[A-Z]+(?:\(\d+\s*(?:,\s*\d+)?\))?)'
                    r'(?:\s+(?P<nullable>NOT NULL|NULL))?'
                    r'(?:\s+DEFAULT\s+(?P<default>(?:\'[^\']*\'|\w+|\([^)]*\))))?'
                    r'(?:\s+COMMENT\s+\'(?P<comment>[^\']*)\')?',
                    line,
                    re.IGNORECASE
                )
                if not col_match:
                    continue

                col_name = col_match.group("name")
                raw_type = col_match.group("type").upper()

                # 分离类型和长度/小数位
                type_match = re.match(r'([A-Z]+)(?:\((\d+)\s*(?:,\s*(\d+))?\))?', col_match.group("type"),
                                      re.IGNORECASE)
                col_type = type_match.group(1).upper()
                length = int(type_match.group(2)) if type_match and type_match.group(2) else None
                scale = int(type_match.group(3)) if type_match and type_match.group(3) else None

                columns.append({
                    "name": col_name,
                    "type": col_type,
                    "length": length,
                    "scale": scale,
                    "nullable": not (col_match.group("nullable") and col_match.group("nullable").upper() == "NOT NULL"),
                    "default": col_match.group("default"),
                    "comment": col_match.group("comment") or "",
                    "pk": col_name in pk_columns
                })

            # 外键信息
            fk_matches = foreign_key_pattern.findall(table_body)
            for fk_match in fk_matches:
                source_column = fk_match[0]
                target_table = fk_match[1]
                target_column = fk_match[2]

                # 确定外键的目标表属于哪个数据库
                target_db = None
                for t in tables:
                    if t["name"] == target_table:
                        target_db = t["database"]
                        break

                if target_db is None:
                    # 如果目标表不在已解析的表中，假设它在同一个数据库中
                    target_db = table_db

                relationships.append({
                    "source_database": table_db,
                    "source_table": table_name,
                    "source_column": source_column,
                    "target_database": target_db,
                    "target_table": target_table,
                    "target_column": target_column
                })

            table_data = {
                "name": table_name,
                "comment": table_comment,
                "columns": columns,
                "database": table_db  # 严格赋值database值
            }
            tables.append(table_data)
            databases[base_filename].append(table_data)  # 使用文件名作为key

        return databases, relationships

    def parse_all_files(self,sql_content:str, filename:str) -> Tuple[Dict[str, List[Dict]], List[Dict]]:
        if filename.startswith('group_'):
            return self.parse_sql_group(sql_content, filename)
        else:
            return self.parse_sql(sql_content)

    def create_or_update_graph(self, db: StandardDatabase, graph_name: str) -> ArangoGraph:
        """创建或更新图结构"""
        # 定义顶点和边集合
        vertex_collections = ["tables", "columns"]

        # 边定义
        edge_definitions = [
            {
                "edge_collection": "has_column_edges",
                "from_vertex_collections": ["tables"],
                "to_vertex_collections": ["columns"]
            },
            {
                "edge_collection": "foreign_key_edges",
                "from_vertex_collections": ["columns"],
                "to_vertex_collections": ["columns"]
            },
            {
                "edge_collection": "refers_to_edges",
                "from_vertex_collections": ["tables"],
                "to_vertex_collections": ["tables"]
            }
        ]

        # 检查图是否存在
        if db.has_graph(graph_name):
            graph = db.graph(graph_name)
            # 更新图定义
            for definition in edge_definitions:
                # 检查边集合是否存在
                if not db.has_collection(definition["edge_collection"]):
                    db.create_collection(definition["edge_collection"], edge=True)
                    logger.info(f"创建新的边集合: {definition['edge_collection']}")

                # 更新边定义
                graph.replace_edge_definition(
                    edge_collection=definition["edge_collection"],
                    from_vertex_collections=definition["from_vertex_collections"],
                    to_vertex_collections=definition["to_vertex_collections"]
                )
            logger.info(f"已更新图: {graph_name}")
        else:
            # 创建新图
            graph = db.create_graph(
                graph_name,
                edge_definitions=edge_definitions
            )
            logger.info(f"已创建新图: {graph_name}")

        return graph

    def _generate_key(self, *parts):
        """生成ArangoDB文档键"""
        return ":".join(parts)

    def import_to_arango(self, databases: Dict[str, List[Dict]], relationships: List[Dict], graph_name: str):
        """将解析的表结构导入ArangoDB"""
        try:
            # 处理每个数据库
            for db_name, tables in databases.items():
                if not tables:
                    logger.warning(f"数据库 {db_name} 没有表，跳过")
                    continue

                # 创建ArangoDB数据库名
                arango_db_name = f"db_structure_{db_name}"
                logger.info(f"处理数据库: {db_name}, ArangoDB数据库: {arango_db_name}")

                # 连接到数据库
                db = self._get_or_create_db(arango_db_name)

                # # 清空数据库的所有非系统collection
                # for col in db.collections():
                #     name = col["name"]
                #     if not name.startswith("_"):
                #         db.delete_collection(name, ignore_missing=True)

                # 创建collections
                collections = {
                    "tables": False,
                    "columns": False,
                    "has_column_edges": True,
                    "foreign_key_edges": True,
                    "refers_to_edges": True
                }
                # 获取现有集合名字
                existing = {c["name"] for c in db.collections()}

                for name, is_edge in collections.items():
                    if name in existing:
                        logger.info(f"Collection {name} already exists, skipping.")
                        continue
                    if is_edge:
                        db.create_collection(name, edge=True)
                    else:
                        db.create_collection(name)
                    logger.info(f"Collection {name} created.")

                # 创建图
                self.create_or_update_graph(db, graph_name)

                # 获取集合引用
                tables_col = db.collection("tables")
                columns_col = db.collection("columns")
                has_col_edge = db.collection("has_column_edges")
                fk_edge = db.collection("foreign_key_edges")
                refers_edge = db.collection("refers_to_edges")

                table_key_map = {}
                column_key_map = {}

                # 插入表数据
                for table in tables:
                    currentDbName = table.get("database", db_name)
                    table_key = self._generate_key(currentDbName, table["name"])
                    table_doc = tables_col.insert({
                        "_key": table_key,
                        "name": table["name"],
                        "comment": table.get("comment", ""),
                        "database": table.get("database",db_name)
                    }, overwrite=True)
                    table_id = table_doc["_id"]
                    table_key_map[table["name"]] = table_id

                    # 插入列数据
                    for col in table["columns"]:
                        column_key = self._generate_key(currentDbName, table["name"], col["name"])
                        col_doc = columns_col.insert({
                            "_key": column_key,
                            "name": col["name"],
                            "type": col["type"],
                            "comment": col.get("comment", ""),
                            "pk": col.get("pk", False),
                            "database": db_name,
                            "table": table["name"],
                            "length": col["length"],
                            "decimal": col["scale"],
                            "nullable": col["nullable"],
                            "default": col["default"]
                        }, overwrite=True)
                        col_id = col_doc["_id"]
                        column_key_map[(table["name"], col["name"])] = col_id

                        # 创建表-列关系
                        edge_key = f"hascol:{table_id}--{col_id}".replace("/", "_")
                        has_col_edge.insert({
                            "_from": table_id,
                            "_to": col_id,
                            "_key": edge_key,
                        },overwrite=True)

                # 处理本数据库的关系
                db_relationships = [r for r in relationships if
                                    r["source_database"] == db_name and r["target_database"] == db_name]

                for rel in db_relationships:
                    # 列到列关系
                    from_col_id = column_key_map.get((rel["source_table"], rel["source_column"]))
                    to_col_id = column_key_map.get((rel["target_table"], rel["target_column"]))

                    if from_col_id and to_col_id:
                        edge_key = f"fk:{from_col_id}--{to_col_id}".replace("/", "_")
                        fk_edge.insert({
                            "_from": from_col_id,
                            "_to": to_col_id,
                            "_key": edge_key,
                        },overwrite=True)

                    # 表到表关系
                    from_table_id = table_key_map.get(rel["source_table"])
                    to_table_id = table_key_map.get(rel["target_table"])

                    if from_table_id and to_table_id:
                        edge_key = f"refers:{from_table_id}--{to_table_id}".replace("/", "_")
                        refers_edge.insert({
                            "_from": from_table_id,
                            "_to": to_table_id,
                            "_key": edge_key,
                        },overwrite=True)

                logger.info(f"成功导入数据库 {db_name}: {len(tables)} 个表, {len(db_relationships)} 个关系")

            return True
        except ArangoError as e:
            logger.error(f"ArangoDB导入失败: {str(e)}")
            raise HTTPException(status_code=500, detail=f"数据库导入失败: {str(e)}")

    def process_file(self, file_path: str, graph_name: str) -> Dict:


        # 解压或直接使用
        if file_path.lower().endswith('.zip'):
            logger.info(f"处理ZIP文件: {file_path}")
            sql_files = self.extract_zip(file_path)
        else:
            logger.info(f"处理SQL文件: {file_path}")
            sql_files = [file_path]

        # 解析 SQL 文件
        all_databases = {}
        all_relationships = []

        for sql_file in sql_files:
            try:
                with open(sql_file, 'r', encoding='utf-8') as file:
                    sql_content = file.read()
                    file_name = os.path.basename(file.name)
                    databases, relationships = self.parse_all_files(sql_content,file_name)

                    # 合并数据库和关系
                    for db_name, tables in databases.items():
                        all_databases.setdefault(db_name, []).extend(tables)

                    all_relationships.extend(relationships)
                    logger.info(
                        f"解析文件成功: {sql_file}, 数据库数量: {len(databases)}, "
                        f"表数量: {sum(len(t) for t in databases.values())}, 关系数量: {len(relationships)}"
                    )
            except Exception as e:
                logger.error(f"解析SQL文件失败: {sql_file}, 错误: {str(e)}")
                raise HTTPException(status_code=400, detail=f"SQL文件解析失败: {str(e)}")

        if not all_databases:
            logger.error("未找到有效的数据库和表数据")
            raise HTTPException(status_code=400, detail="未找到有效的数据库和表数据")

        # 导入 ArangoDB
        self.import_to_arango(all_databases, all_relationships, graph_name)

        # 清理临时文件
        try:
            shutil.rmtree(os.path.dirname(file_path))
            logger.info(f"临时文件已清理: {os.path.dirname(file_path)}")
        except Exception as e:
            logger.warning(f"临时文件清理失败: {str(e)}")

        # 生成结果
        db_stats = {db_name: len(tables) for db_name, tables in all_databases.items()}

        return {
            "databases": db_stats,
            "tables": sum(len(t) for t in all_databases.values()),
            "relations": len(all_relationships),
            "graph": graph_name
        }

    async def process(self, file_url: str, file_name: str, graph_name: str) -> Dict:
        """
        已有文件路径时调用，解析并导入 ArangoDB
        """
        # 验证文件类型
        if not (file_name.lower().endswith('.sql') or file_name.lower().endswith('.zip')):
            raise HTTPException(status_code=400, detail="仅支持SQL或ZIP格式的文件")
        """
        完整流程：下载 + 调用 process_file
        """
        file_path = await self.download_file(file_url, file_name)
        return self.process_file(file_path, graph_name)

    async def get_database_info(self, file_path: str) -> Dict:
        # 根据文件类型处理
        if file_path.lower().endswith('.zip'):
            logger.info(f"处理ZIP文件: {file_path}")
            sql_files = self.extract_zip(file_path)
        else:
            logger.info(f"处理SQL文件: {file_path}")
            sql_files = [file_path]

        # 解析SQL文件
        all_databases = {}
        all_relationships = []

        for sql_file in sql_files:
            try:
                with open(sql_file, 'r', encoding='utf-8') as file:
                    sql_content = file.read()
                    databases, relationships = self.parse_sql(sql_content)

                    # 合并数据库和关系
                    for db_name, tables in databases.items():
                        if db_name in all_databases:
                            all_databases[db_name].extend(tables)
                        else:
                            all_databases[db_name] = tables

                    all_relationships.extend(relationships)
                    logger.info(
                        f"解析文件成功: {sql_file}, 数据库数量: {len(databases)}, "
                        f"表数量: {sum(len(t) for t in databases.values())}, "
                        f"关系数量: {len(relationships)}"
                    )
            except Exception as e:
                logger.error(f"解析SQL文件失败: {sql_file}, 错误: {str(e)}")
                raise HTTPException(status_code=400, detail=f"SQL文件解析失败: {str(e)}")

        # 统计信息
        database_count = len(all_databases)
        table_count = sum(len(tables) for tables in all_databases.values())

        # 每个数据库下取前10个表
        sample_tables = {
            db_name: tables[:10] for db_name, tables in all_databases.items()
        }

        return {
            "databases": all_databases,
            "database_count": database_count,
            "table_count": table_count,
            "sample_tables": sample_tables
        }
# FastAPI端点
# @app.post("/import/sql_to_arango", response_model=SQLImportResponse)
# async def import_sql_to_arango(request_data: SQLImportRequest = Body(...)):
#     service = SQLToArangoDBService()
#     try:
#         result = await service.process(
#             file_url=request_data.file_url,
#             file_name=request_data.file_name,
#             graph_name=request_data.graph_name,
#             db_name=request_data.db_name
#         )
#         return SQLImportResponse(data=result)
#     except Exception as e:
#         logger.error(f"SQL导入失败: {str(e)}", exc_info=True)
#         return SQLImportResponse(
#             status="error",
#             message=str(e)
#         )
#
#
# # 本地测试
# if __name__ == "__main__":
#     import uvicorn
#
#     uvicorn.run(app, host="0.0.0.0", port=8000)

# 本地测试
# if __name__ == "__main__":
#     # 设置本地测试配置
#     class TestSettings:
#         temp_dir_root = "D:\PythonProjects\doc_convert_v2/app\convert_to_yaml/tmp\sql_processing_20250911_153749_cf2b0793\merged"
#         arango_host = "http://localhost:8529"  # 修改为你的ArangoDB地址
#         arango_user = "root"  # 修改为你的ArangoDB用户名
#         arango_password = "password"  # 修改为你的ArangoDB密码
#         dify_prefix = ""
#
#
#     # 替换默认设置
#     settings = TestSettings()
#
#     # 创建服务实例
#     service = SQLToArangoDBService()
#
#     # 设置本地文件路径（修改为你的SQL文件路径）
#     local_file_path = "D:\PythonProjects\doc_convert_v2/app\convert_to_yaml/tmp\sql_processing_20250911_153749_cf2b0793\merged\group_cif.sql"  # 或者ZIP文件路径
#
#     # 设置图名称
#     graph_name = "test_graph"
#
#     try:
#         # 直接调用process_file方法
#         result = service.process_file(local_file_path, graph_name)
#         print("导入成功！")
#         print(f"结果: {result}")
#     except Exception as e:
#         print(f"导入失败: {str(e)}")
#         import traceback
#
#         traceback.print_exc()