# import os
# import re
# import uuid
# import datetime
# import shutil
# import aiohttp
# import neo4j
# from fastapi import FastAPI, HTTPException
# from neo4j import GraphDatabase
# from pydantic import BaseModel
# from typing import Dict, List, Tuple, Optional
# import logging
#
# from app.config import settings
# import sqlparse
# from sqlparse.sql import Identifier, Parenthesis, Statement
# from sqlparse.tokens import Keyword
#
#
# # 配置日志
# logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
# logger = logging.getLogger(__name__)
#
# # 初始化FastAPI应用
# app = FastAPI(title="ER图生成接口服务", description="解析SQL并导入Neo4j（含完整字段属性）")
#
#
# # 请求/响应模型
# class ERGenerateRequest(BaseModel):
#     sql_url: str
#     er_database: str
#
#
# class ERGenerateResponse(BaseModel):
#     status: str = "success"
#     message: Optional[str] = None
#     data: Optional[Dict] = None
#
#
# class SQLToNeo4jService:
#     def __init__(self):
#         os.makedirs(settings.temp_dir_root, exist_ok=True)
#         self.driver = None
#
#     def _init_neo4j_driver(self):
#         if not self.driver:
#             try:
#                 self.driver = GraphDatabase.driver(
#                     settings.neo4j_uri,
#                     auth=(settings.neo4j_user, settings.neo4j_password)
#                 )
#                 self.driver.verify_connectivity()
#                 logger.info("Neo4j驱动初始化成功")
#             except Exception as e:
#                 logger.error(f"Neo4j驱动初始化失败: {str(e)}")
#                 raise HTTPException(status_code=500, detail=f"无法连接到Neo4j: {str(e)}")
#
#     async def download_sql_file(self, url: str) -> str:
#         # 保持不变（文件下载逻辑）
#         if not url.startswith(settings.dify_prefix) and settings.dify_prefix:
#             full_url = settings.dify_prefix.rstrip("/") + "/" + url.lstrip("/")
#         else:
#             full_url = url
#
#         timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
#         unique_id = uuid.uuid4().hex[:8]
#         temp_dir = os.path.join(settings.temp_dir_root, f"tmp_{timestamp}_{unique_id}")
#         os.makedirs(temp_dir, exist_ok=True)
#
#         filename = url.split("/")[-1]
#         if not filename.endswith(".sql"):
#             filename = f"er_import_{unique_id}.sql"
#         file_path = os.path.join(temp_dir, filename)
#
#         try:
#             async with aiohttp.ClientSession() as session:
#                 async with session.get(full_url) as resp:
#                     if resp.status != 200:
#                         logger.error(f"SQL文件下载失败，URL: {full_url}，状态码: {resp.status}")
#                         raise HTTPException(status_code=400, detail=f"SQL文件下载失败，状态码: {resp.status}")
#                     with open(file_path, "wb") as f:
#                         while True:
#                             chunk = await resp.content.read(1024 * 1024)
#                             if not chunk:
#                                 break
#                             f.write(chunk)
#             logger.info(f"SQL文件下载成功，路径: {file_path}")
#             return file_path
#         except Exception as e:
#             logger.error(f"下载过程异常: {str(e)}")
#             raise HTTPException(status_code=500, detail=f"文件下载失败: {str(e)}")
#
#     # def parse_sql_relations(self, sql_file_path: str) -> Tuple[List[Dict], List[Dict]]:
#     #     """解析SQL文件，提取表名、字段信息和表关系（外键）"""
#     #     try:
#     #         with open(sql_file_path, "r", encoding="utf-8") as f:
#     #             sql_content = f.read()
#     #     except Exception as e:
#     #         logger.error(f"读取SQL文件失败: {str(e)}")
#     #         raise HTTPException(status_code=500, detail=f"读取SQL文件失败: {str(e)}")
#     #
#     #     # 预处理SQL（移除注释和多余空格）
#     #     sql_content = re.sub(r'/\*.*?\*/', '', sql_content, flags=re.DOTALL)
#     #     sql_content = re.sub(r'--.*?$', '', sql_content, flags=re.MULTILINE)
#     #     sql_content = re.sub(r'\s+', ' ', sql_content).strip()
#     #
#     #     tables = []
#     #     relations = []
#     #     statements = sqlparse.parse(sql_content)
#     #
#     #     for statement in statements:
#     #         if not statement.tokens:
#     #             continue
#     #
#     #         # 识别CREATE TABLE语句
#     #         create_tokens = [t for t in statement.tokens if
#     #                          isinstance(t, sqlparse.sql.Token) and
#     #                          t.value.upper() in ('CREATE', 'TABLE')]
#     #         if len(create_tokens) < 2:
#     #             continue
#     #
#     #         # 提取表名（保留原始逻辑）
#     #         table_name = None
#     #         for token in statement.tokens:
#     #             if isinstance(token, Identifier):
#     #                 table_name = token.value.strip('`')
#     #                 break
#     #         if not table_name:
#     #             continue
#     #
#     #         # 解析表字段（核心优化）
#     #         fields = ""
#     #         table_body = None
#     #         # 定位表结构的括号内容（字段定义在()内）
#     #         for token in statement.tokens:
#     #             if isinstance(token, Parenthesis):
#     #                 table_body = token
#     #                 break
#     #         if not table_body:
#     #             tables.append({"name": table_name, "fields": fields})
#     #             continue
#     #
#     #         # 字段定义通常是用逗号分隔的，所以我们先分割
#     #         field_definitions = []
#     #         current_definition = []
#     #         paren_depth = 0
#     #
#     #         for token in table_body.tokens:
#     #             if isinstance(token, sqlparse.sql.Token) and token.value == '(':
#     #                 paren_depth += 1
#     #             elif isinstance(token, sqlparse.sql.Token) and token.value == ')':
#     #                 paren_depth -= 1
#     #
#     #             if isinstance(token, sqlparse.sql.Token) and token.value == ',' and paren_depth == 0:
#     #                 if current_definition:
#     #                     field_definitions.append(' '.join([str(t) for t in current_definition]).strip())
#     #                     current_definition = []
#     #             else:
#     #                 current_definition.append(token)
#     #
#     #         if current_definition:
#     #             field_definitions.append(' '.join([str(t) for t in current_definition]).strip())
#     #
#     #         # 处理每个字段定义
#     #         for field_def in field_definitions:
#     #             # if not field_def or field_def.upper().startswith('PRIMARY KEY') or field_def.upper().startswith(
#     #             #         'UNIQUE'):
#     #             #     continue
#     #             #
#     #             # # 提取字段名
#     #             # col_name_match = re.match(r'`?([^`\s]+)`?', field_def)
#     #             # if not col_name_match:
#     #             #     continue
#     #             # col_name = col_name_match.group(1)
#     #             #
#     #             # # 提取数据类型（优化：更精确的类型匹配）
#     #             # type_pattern = r'(?i)' + re.escape(col_name) + r'\s+([a-z]+(?:\([^)]+\))?(?:\s+(?:unsigned|zerofill))?)'
#     #             # col_type_match = re.search(type_pattern, field_def)
#     #             # col_type = col_type_match.group(1).strip() if col_type_match else "UNKNOWN"
#     #             #
#     #             # # 提取非空约束
#     #             # is_not_null = 'NOT NULL' in field_def.upper()
#     #             #
#     #             # # 提取主键约束
#     #             # is_primary = 'PRIMARY KEY' in field_def.upper()
#     #             #
#     #             # # 提取默认值
#     #             # default_match = re.search(r'(?i)DEFAULT\s+([^\s,]+)', field_def)
#     #             # default_value = default_match.group(1) if default_match else None
#     #             #
#     #             # # 提取注释
#     #             # comment_match = re.search(r'(?i)COMMENT\s+[\'"]([^\'"]*)[\'"]', field_def)
#     #             # comment = comment_match.group(1) if comment_match else ""
#     #             #
#     #             # # 提取自增属性
#     #             # is_auto_increment = 'AUTO_INCREMENT' in field_def.upper()
#     #
#     #             fields = field_def#.append({
#     #             #     "name": col_name,
#     #             #     "type": col_type,
#     #             #     "primary_key": is_primary,
#     #             #     "not_null": is_not_null,
#     #             #     "default": default_value,
#     #             #     "comment": comment,
#     #             #     "auto_increment": is_auto_increment
#     #             # })
#     #             # logger.debug(
#     #             #     f"解析字段: {table_name}.{col_name} → 类型: {col_type}, 主键: {is_primary}, 非空: {is_not_null}")
#     #
#     #         # 补充处理单独定义的主键约束
#     #         for field_def in field_definitions:
#     #             if field_def.upper().startswith('PRIMARY KEY'):
#     #                 pk_pattern = re.compile(r'PRIMARY\s+KEY\s*\(\s*`?([^`]+)`?\s*\)', re.IGNORECASE)
#     #                 pk_match = pk_pattern.search(field_def)
#     #                 if pk_match:
#     #                     pk_col = pk_match.group(1).strip()
#     #                     for field in fields:
#     #                         if field["name"] == pk_col:
#     #                             field["primary_key"] = True
#     #                             break
#     #
#     #         tables.append({"name": table_name, "fields": fields})
#     #
#     #         # 提取外键关系
#     #         fk_pattern = re.compile(
#     #             r'FOREIGN\s+KEY\s*\(\s*`?([^`]+)`?\s*\)\s+REFERENCES\s+`?([^`]+)`?\s*\(\s*`?([^`]+)`?\s*\)',
#     #             re.IGNORECASE
#     #         )
#     #         for match in fk_pattern.finditer(str(statement)):
#     #             fk_column = match.group(1).strip()
#     #             target_table = match.group(2).strip()
#     #             target_column = match.group(3).strip()
#     #
#     #             relations.append({
#     #                 "source_table": table_name,
#     #                 "target_table": target_table,
#     #                 "source_column": fk_column,
#     #                 "target_column": target_column,
#     #                 "foreign_key": fk_column
#     #             })
#     #             logger.info(f"发现关系: {table_name}.{fk_column} → {target_table}.{target_column}")
#     #
#     #     if not tables:
#     #         raise HTTPException(status_code=400, detail="未发现有效表结构")
#     #
#     #     logger.info(f"成功解析 {len(tables)} 张表，{len(relations)} 个关系")
#     #     return tables, relations
#     #
#     # def generate_cypher(self, tables: List[Dict], relations: List[Dict]) -> List[str]:
#     #     """生成Cypher语句（确保fields正确存入数据库）"""
#     #     cypher = []
#     #
#     #     for table in tables:
#     #         table_name = table["name"]
#     #         # 为每个表生成独立标签（首字母大写）
#     #         label = table_name.capitalize()
#     #
#     #         # 处理fields为Neo4j可识别的列表格式
#     #         # fields_list = []
#     #         # for field in table["fields"]:
#     #         #     # 转义字符串中的双引号和反斜杠
#     #         #     field_name = field["name"].replace('\\', '\\\\').replace('"', '\\"')
#     #         #     field_type = field["type"].replace('\\', '\\\\').replace('"', '\\"')
#     #         #     field_comment = field["comment"].replace('\\', '\\\\').replace('"', '\\"')
#     #         #
#     #         #     # 处理默认值（如果是字符串需要加引号）
#     #         #     default_value = field["default"]
#     #         #     if default_value is not None:
#     #         #         if isinstance(default_value, str):
#     #         #             default_value = f'"{default_value.replace("\\", "\\\\").replace('"', '\\"')}"'
#     #         #     else:
#     #         #         default_value = "null"
#     #         #
#     #         #     fields_list.append(
#     #         #         f'{{name: "{field_name}", type: "{field_type}", '
#     #         #         f'primary_key: {str(field["primary_key"]).lower()}, '
#     #         #         f'not_null: {str(field["not_null"]).lower()}, '
#     #         #         f'default: {default_value}, '
#     #         #         f'comment: "{field_comment}", '
#     #         #         f'auto_increment: {str(field["auto_increment"]).lower()}}}'
#     #         #     )
#     #
#     #         fields_str = table["fields"] #", ".join(fields_list)
#     #
#     #         # 创建节点（带独立标签和完整fields属性）
#     #         cypher_stmt = (
#     #             f'MERGE (t:{label} {{name: "{table_name}", fields: "{fields_str}"}})'
#     #         )
#     #         cypher.append(cypher_stmt)
#     #         logger.debug(f"生成表节点Cypher: {cypher_stmt[:100]}...")
#     #
#     #     # 生成关系Cypher
#     #     for rel in relations:
#     #         source_label = rel["source_table"].capitalize()
#     #         target_label = rel["target_table"].capitalize()
#     #
#     #         cypher.append(
#     #             f'MERGE (s:{source_label} {{name: "{rel["source_table"]}"}})\n'
#     #             f'MERGE (t:{target_label} {{name: "{rel["target_table"]}"}})\n'
#     #             f'MERGE (s)-[r:REFERENCES {{'
#     #             f'foreign_key: "{rel["foreign_key"]}", '
#     #             f'source_column: "{rel["source_column"]}", '
#     #             f'target_column: "{rel["target_column"]}"'
#     #             f'}}]->(t)'
#     #         )
#     #
#     #     return cypher
#     #
#     # def import_to_neo4j(self, database: str, cypher_statements: List[str]) -> Dict:
#     #     """导入到Neo4j（增强错误处理）"""
#     #     self._init_neo4j_driver()
#     #     success_count = 0
#     #     fail_count = 0
#     #     fail_details = []
#     #
#     #     with self.driver.session(database=database) as session:
#     #         for i, stmt in enumerate(cypher_statements):
#     #             try:
#     #                 # 打印正在执行的Cypher语句（仅调试模式）
#     #                 logger.debug(f"执行Cypher语句 ({i + 1}/{len(cypher_statements)}): {stmt[:100]}...")
#     #
#     #                 # 执行Cypher语句
#     #                 result = session.execute_write(self._run_cypher, stmt)
#     #                 success_count += 1
#     #
#     #                 # 记录执行结果（仅调试模式）
#     #                 logger.debug(f"Cypher执行成功，结果: {result}")
#     #             except neo4j.exceptions.Neo4jError as e:
#     #                 fail_count += 1
#     #                 fail_details.append({
#     #                     "statement": stmt,
#     #                     "error_type": e.__class__.__name__,
#     #                     "message": str(e)
#     #                 })
#     #                 logger.error(f"Cypher执行失败: {str(e)}")
#     #                 logger.error(f"失败语句: {stmt}")
#     #             except Exception as e:
#     #                 fail_count += 1
#     #                 fail_details.append({
#     #                     "statement": stmt,
#     #                     "error_type": "GeneralError",
#     #                     "message": str(e)
#     #                 })
#     #                 logger.error(f"执行异常: {str(e)}")
#     #                 logger.error(f"异常语句: {stmt}")
#     #
#     #     if fail_count > 0:
#     #         logger.warning(f"Neo4j导入完成: 成功={success_count}, 失败={fail_count}")
#     #     else:
#     #         logger.info(f"Neo4j导入全部成功: {success_count} 条语句")
#     #
#     #     return {
#     #         "total": len(cypher_statements),
#     #         "success": success_count,
#     #         "fail": fail_count,
#     #         "fail_details": fail_details
#     #     }
#     #
#     # @staticmethod
#     # def _run_cypher(tx, cypher: str):
#     #     result = tx.run(cypher)
#     #     return [record for record in result]
#
#     def load_sql_file(self, sql_file_path: str)-> str:
#         """解析SQL文件，提取表名、字段信息和表关系（外键）"""
#
#         with open(sql_file_path, 'r', encoding='utf-8') as f:
#             return f.read()
#
#     def write_to_neo4j(self, tables: dict, foreign_keys: list, database: str):
#         self._init_neo4j_driver()
#         if not database:
#             database = "neo4j"
#         with self.driver.session(database=database) as session:
#             session.run("""
#                 CREATE CONSTRAINT table_name_unique IF NOT EXISTS
#                 FOR (t:Table)
#                 REQUIRE t.name IS UNIQUE
#             """)
#             session.run("""
#                 CREATE CONSTRAINT field_full_name_unique IF NOT EXISTS
#                 FOR (f:Field)
#                 REQUIRE f.full_name IS UNIQUE
#             """)
#
#             for table_name, fields in tables.items():
#                 session.run("MERGE (t:Table {name: $name})", name=table_name)
#                 for field in fields:
#                     field_name = field["name"]
#                     field_type = field["type"]
#                     length = field["length"]
#                     decimal_length = field["decimal_length"]
#                     nullable = field["nullable"]
#                     is_primary = field["is_primary_key"]
#                     comment = field["comment"]
#
#                     full_name = f"{table_name}.{field_name}"
#                     session.run("""
#                         MERGE (f:Field {full_name: $full_name})
#                         SET f.name = $field_name, f.type = $type, f.comment = $comment,f.length = $length, f.decimal_length = $decimal_length, f.nullable = $nullable, f.is_primary = $is_primary
#                     """, full_name=full_name, field_name=field_name, type=field_type, comment=comment, length=length,
#                                 decimal_length=decimal_length, nullable=nullable, is_primary=is_primary)
#                     session.run("""
#                         MATCH (t:Table {name: $table}), (f:Field {full_name: $full_name})
#                         MERGE (t)-[:HAS_COLUMN]->(f)
#                     """, table=table_name, full_name=full_name)
#
#             for from_table, from_col, to_table, to_col in foreign_keys:
#                 from_key = f"{from_table}.{from_col}"
#                 to_key = f"{to_table}.{to_col}"
#
#                 session.run("""
#                     MATCH (f1:Field {full_name: $from_key}), (f2:Field {full_name: $to_key})
#                     MERGE (f1)-[:FOREIGN_KEY]->(f2)
#                 """, from_key=from_key, to_key=to_key)
#
#                 # 加上表之间的引用
#                 session.run("""
#                     MATCH (t1:Table {name: $from_table}), (t2:Table {name: $to_table})
#                     MERGE (t1)-[:REFERS_TO_TABLE]->(t2)
#                 """, from_table=from_table, to_table=to_table)
#
#         self.driver.close()
#
#     def extract_table_blocks(self,sql_text: str) -> List[str]:
#         # 兼容 CREATE TABLE table_name (...) ENGINE=...; 不管有没有反引号
#         pattern = r'CREATE TABLE\s+(?:`?\w+`?)\s*\(.*?\)\s*ENGINE=.*?;'
#         return re.findall(pattern, sql_text, re.S | re.I)
#
#     def parse_table_structure(self,sql_block: str) -> Tuple[str, List[Dict], List[Tuple[str, str, str, str]]]:
#         table_name_match = re.search(r'CREATE TABLE\s+`?(\w+)`?', sql_block, re.IGNORECASE)
#         table_name = table_name_match.group(1) if table_name_match else 'UNKNOWN'
#
#         lines = sql_block.splitlines()
#         fields = []
#         fks = []
#         primary_keys = set()
#
#         # 提前找 PRIMARY KEY 字段名（避免字段定义顺序限制）
#         pk_match = re.search(r'PRIMARY KEY\s*\((.*?)\)', sql_block, re.IGNORECASE)
#         if pk_match:
#             pk_fields = pk_match.group(1)
#             pk_fields = [col.strip(' `') for col in pk_fields.split(',')]
#             primary_keys.update(pk_fields)
#
#         for line in lines[1:]:
#             line = line.strip().rstrip(',')
#
#             # ✅ 匹配字段定义：`name` TYPE[(len[,scale])] [NOT NULL] ... COMMENT 'xxx'
#             col_match = re.match(
#                 r'^`(?P<name>\w+)`\s+(?P<type>\w+)(?:\((?P<length>\d+)(?:,\s*(?P<scale>\d+))?\))?.*?(?P<null>NOT NULL|NULL)?(?:.*?COMMENT\s+\'(?P<comment>[^\']*)\')?',
#                 line,
#                 re.IGNORECASE
#             )
#
#             if col_match:
#                 name = col_match.group('name')
#                 field_type = col_match.group('type').upper()
#                 length = int(col_match.group('length')) if col_match.group('length') else None
#                 scale = int(col_match.group('scale')) if col_match.group('scale') else None
#                 nullable = col_match.group('null') != 'NOT NULL'
#                 comment = col_match.group('comment') or ''
#                 is_primary = name in primary_keys
#
#                 fields.append({
#                     "name": name,
#                     "type": field_type,
#                     "length": length,
#                     "decimal_length": scale,
#                     "nullable": nullable,
#                     "is_primary_key": is_primary,
#                     "comment": comment
#                 })
#
#
#             # ✅ 解析外键（支持反引号）
#             elif 'FOREIGN KEY' in line.upper():
#                 match = re.search(
#                     r'FOREIGN KEY\s*\(`?(\w+)`?\)\s+REFERENCES\s+`?(\w+)`?\s*\(`?(\w+)`?\)',
#                     line,
#                     re.IGNORECASE
#                 )
#                 if match:
#                     from_col, to_table, to_col = match.groups()
#                     fks.append((table_name, from_col, to_table, to_col))
#
#         return table_name, fields, fks
#
#     async def process(self, sql_url: str, database: str) -> Dict:
#         """完整处理流程"""
#         sql_path = await self.download_sql_file(sql_url)
#         # tables, relations = self.parse_sql_relations(sql_path)
#         # cypher = self.generate_cypher(tables, relations)
#         # import_result = self.import_to_neo4j(database, cypher)
#
#         sql_text = self.load_sql_file(sql_path)
#         table_blocks = self.extract_table_blocks(sql_text)
#
#         tables = {}
#         foreign_keys = []
#
#         for block in table_blocks:
#             table_name, fields, fks = self.parse_table_structure(block)
#             tables[table_name] = fields
#             for fk in fks:
#                 foreign_keys.append((*fk,))
#
#         self.write_to_neo4j(tables, foreign_keys,database)
#
#         # 清理临时文件
#         try:
#             shutil.rmtree(os.path.dirname(sql_path))
#             logger.info(f"临时文件已清理: {os.path.dirname(sql_path)}")
#         except Exception as e:
#             logger.warning(f"临时文件清理失败: {str(e)}")
#
#
#
#         return {
#             "tables": {"count": len(tables), "list": tables},
#             "relations": {"count": len(foreign_keys), "list": foreign_keys},
#             "neo4j_import": "success"
#         }