import os
import re
import uuid
import datetime
import shutil
import zipfile

import aiohttp
import yaml
import hashlib

# from arango.client import ArangoClient
from fastapi import FastAPI, HTTPException, Body
from pydantic import BaseModel
from typing import Dict, List, Tuple, Optional
import logging
from arango import ArangoClient
from arango.exceptions import ArangoError
from arango.database import StandardDatabase
from arango.graph import Graph as ArangoGraph
from starlette.middleware.cors import CORSMiddleware

from app.config import settings

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# 初始化FastAPI应用
app = FastAPI(title="ER图生成接口服务", description="解析SQL并导入ArangoDB（含完整字段属性）")

app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)
# 请求/响应模型
class YAMLERGenerateRequest(BaseModel):
    file_url: str
    graph_name: Optional[str] = "er_graph"  # 指定图名称
    file_name: str


class YAMLERGenerateResponse(BaseModel):
    status: str = "success"
    message: Optional[str] = None
    data: Optional[Dict] = None


class YamlToArangoDBService:
    def __init__(self):
        os.makedirs(settings.temp_dir_root, exist_ok=True)
        self.client = None
        self.db = None

    def _init_arango_client(self):
        if not self.client:
            try:
                self.client = ArangoClient(hosts=settings.arango_host)
                logger.info("ArangoDB客户端初始化成功")
            except Exception as e:
                logger.error(f"ArangoDB客户端初始化失败: {str(e)}")
                raise HTTPException(status_code=500, detail=f"无法连接到ArangoDB: {str(e)}")

    def _get_or_create_db(self, db_name: str) -> StandardDatabase:
        """获取或创建指定的数据库"""
        self._init_arango_client()

        sys_db = self.client.db('_system', username=settings.arango_user, password=settings.arango_password)

        # 检查数据库是否存在
        if not sys_db.has_database(db_name):
            sys_db.create_database(db_name)
            logger.info(f"创建新数据库: {db_name}")

        # 连接到目标数据库
        self.db = self.client.db(db_name, username=settings.arango_user, password=settings.arango_password)
        logger.info(f"已连接到数据库: {db_name}")

        return self.db

    async def download_file(self, url: str, file_name: str) -> str:
        """下载文件并返回本地路径"""
        if not url.startswith(settings.dify_prefix) and settings.dify_prefix and not url.startswith("http"):
            full_url = settings.dify_prefix.rstrip("/") + "/" + url.lstrip("/")
        else:
            full_url = url

        timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
        unique_id = uuid.uuid4().hex[:8]
        temp_dir = os.path.join(settings.temp_dir_root, f"tmp_{timestamp}_{unique_id}")
        os.makedirs(temp_dir, exist_ok=True)

        # 从URL中提取文件名
        filename = url.split("/")[-1]
        if not filename.endswith(".yaml") and file_name.endswith(".yaml"):
            filename = f"er_import_{unique_id}.yaml"
        else:
            if not filename.endswith(".zip") and file_name.endswith(".zip"):
                filename = f"er_import_{unique_id}.zip"
        # if not any(filename.lower().endswith(ext) for ext in ['.yaml', '.yml', '.zip']):
        #     filename = f"er_import_{unique_id}"  # 默认文件名

        file_path = os.path.join(temp_dir, filename)

        try:
            async with aiohttp.ClientSession() as session:
                async with session.get(full_url) as resp:
                    if resp.status != 200:
                        logger.error(f"文件下载失败，URL: {full_url}，状态码: {resp.status}")
                        raise HTTPException(status_code=400, detail=f"文件下载失败，状态码: {resp.status}")
                    with open(file_path, "wb") as f:
                        while True:
                            chunk = await resp.content.read(1024 * 1024)
                            if not chunk:
                                break
                            f.write(chunk)
            logger.info(f"文件下载成功，路径: {file_path}")
            return file_path
        except Exception as e:
            logger.error(f"下载过程异常: {str(e)}")
            raise HTTPException(status_code=500, detail=f"文件下载失败: {str(e)}")

    def extract_zip(self, zip_path: str) -> List[str]:
        """解压ZIP文件并返回YAML文件列表"""
        yaml_files = []
        extract_dir = os.path.join(os.path.dirname(zip_path), "extracted")
        os.makedirs(extract_dir, exist_ok=True)

        try:
            with zipfile.ZipFile(zip_path, 'r') as zip_ref:
                # 过滤并提取所有YAML/YML文件
                for file in zip_ref.namelist():
                    if file.lower().endswith(('.yaml', '.yml')):
                        zip_ref.extract(file, extract_dir)
                        extracted_path = os.path.join(extract_dir, file)
                        yaml_files.append(extracted_path)
                        logger.info(f"提取文件: {extracted_path}")

            if not yaml_files:
                logger.warning(f"ZIP文件中未找到YAML文件: {zip_path}")

            return yaml_files
        except zipfile.BadZipFile as e:
            logger.error(f"ZIP文件损坏: {str(e)}")
            raise HTTPException(status_code=400, detail=f"ZIP文件损坏: {str(e)}")
        except Exception as e:
            logger.error(f"解压ZIP文件失败: {str(e)}")
            raise HTTPException(status_code=500, detail=f"解压ZIP文件失败: {str(e)}")

    def parse_type(self, type_str):
        """解析字段类型字符串（如 VARCHAR(50), DECIMAL(17,2)）"""
        base_type = type_str
        length = None
        precision = None

        if '(' in type_str and ')' in type_str:
            base_type = type_str.split('(')[0].strip().upper()
            params = type_str.split('(')[1].split(')')[0]

            if ',' in params:
                parts = params.split(',')
                length = int(parts[0]) if parts[0].isdigit() else None
                precision = int(parts[1]) if len(parts) > 1 and parts[1].isdigit() else None
            else:
                length = int(params) if params.isdigit() else None

        return {
            "base_type": base_type,
            "length": length,
            "precision": precision,
            "full_type": type_str
        }

    def _generate_key(self, *parts):
        """生成ArangoDB文档键"""
        # 对于表，使用"数据库名:表名"作为key
        if len(parts) == 2:
            return f"{parts[0]}:{parts[1]}"
        # 对于列，使用"数据库名:表名:列名"作为key
        elif len(parts) == 3:
            return f"{parts[0]}:{parts[1]}:{parts[2]}"
        # 其他情况使用下划线连接（应对可能的扩展场景）
        else:
            return "_".join(parts)

    def create_or_update_graph(self, db: StandardDatabase, graph_name: str) -> ArangoGraph:
        """创建或更新图结构"""
        # 定义顶点和边集合
        vertex_collections = ["tables", "columns"]

        # 边定义
        edge_definitions = [
            {
                "edge_collection": "has_column_edges",
                "from_vertex_collections": ["tables"],
                "to_vertex_collections": ["columns"]
            },
            {
                "edge_collection": "foreign_key_edges",
                "from_vertex_collections": ["columns"],
                "to_vertex_collections": ["columns"]
            },
            {
                "edge_collection": "refers_to_edges",
                "from_vertex_collections": ["tables"],
                "to_vertex_collections": ["tables"]
            }
        ]

        # 检查图是否存在
        if db.has_graph(graph_name):
            graph = db.graph(graph_name)
            # 更新图定义
            for definition in edge_definitions:
                # 检查边集合是否存在
                if not db.has_collection(definition["edge_collection"]):
                    db.create_collection(definition["edge_collection"], edge=True)
                    logger.info(f"创建新的边集合: {definition['edge_collection']}")

                # 更新边定义
                graph.replace_edge_definition(
                    edge_collection=definition["edge_collection"],
                    from_vertex_collections=definition["from_vertex_collections"],
                    to_vertex_collections=definition["to_vertex_collections"]
                )
            logger.info(f"已更新图: {graph_name}")
        else:
            # 创建新图
            graph = db.create_graph(
                graph_name,
                edge_definitions=edge_definitions
            )
            logger.info(f"已创建新图: {graph_name}")

        return graph

    def import_to_arango(self, tables, relationships, graph_name: str):
        # 按数据库分组表
        db_tables = {}
        for table in tables:
            if 'database' not in table:
                raise HTTPException(status_code=400, detail=f"表 {table['table']} 缺少database属性")
            result = table['database'].rsplit('/', 1)[-1]
            db_name = f"db_structure_{result}"
            db_tables.setdefault(db_name, []).append(table)

        # 按数据库分组关系
        db_relationships = {}
        for rel in relationships:
            if rel['source_database'] == rel['target_database']:
                db_name = f"db_structure_{rel['source_database']}"
                db_relationships.setdefault(db_name, []).append(rel)
            else:
                logger.warning(
                    f"跨数据库关系被忽略: {rel['source_database']}.{rel['source_table']}.{rel['source_column']} → "
                    f"{rel['target_database']}.{rel['target_table']}.{rel['target_column']}"
                )

        # 处理每个数据库
        for db_name, db_tables_list in db_tables.items():
            logger.info(f"处理数据库: {db_name}, 包含 {len(db_tables_list)} 个表")

            db = self._get_or_create_db(db_name)

            # 创建顶点集合
            vertex_collections = ["tables", "columns"]
            for col in vertex_collections:
                if not db.has_collection(col):
                    db.create_collection(col)
                    logger.info(f"创建顶点集合: {col}")

            # 创建边集合
            edge_collections = ["has_column_edges", "foreign_key_edges", "refers_to_edges"]
            for col in edge_collections:
                if not db.has_collection(col):
                    db.create_collection(col, edge=True)
                    logger.info(f"创建边集合: {col}")

            # 创建或更新图
            self.create_or_update_graph(db, graph_name)

            table_keys = {}
            column_keys = {}

            # 插入/更新表数据
            for table in db_tables_list:
                result = table['database'].rsplit('/', 1)[-1]
                database_name = result
                table_key = self._generate_key(database_name, table['table'])
                table_doc = {
                    "_key": table_key,
                    "name": table['table'],
                    "database": database_name,
                    "domain": table.get('domain'),
                    "description": table.get('description'),
                    "comment": table.get('comment'),
                    "type": "table"
                }
                db.collection("tables").insert(table_doc, overwrite=True)
                table_keys[(database_name, table['table'])] = table_key

            # 插入/更新列数据
            for table in db_tables_list:
                result = table['database'].rsplit('/', 1)[-1]
                database_name = result
                for col in table['columns']:
                    type_info = self.parse_type(col['type'])
                    col_key = self._generate_key(database_name, table['table'], col['name'])

                    col_doc = {
                        "_key": col_key,
                        "name": col['name'],
                        "database": database_name,
                        "table": table['table'],
                        "base_type": type_info['base_type'],
                        "full_type": type_info['full_type'],
                        "length": type_info['length'],
                        "precision": type_info['precision'],
                        "nullable": col['nullable'],
                        "comment": col.get('comment', ''),
                        "primary_key": col.get('primary_key', False),
                        "foreign_key": col.get('foreign_key', False),
                        "type": "column"
                    }

                    if 'tags' in col:
                        col_doc.update(col['tags'])

                    db.collection("columns").insert(col_doc, overwrite=True)
                    column_keys[(database_name, table['table'], col['name'])] = col_key

                    # 表-列关系：先查是否存在，不存在才插
                    edge_from = f"tables/{table_keys[(database_name, table['table'])]}"
                    edge_to = f"columns/{col_key}"
                    if not db.collection("has_column_edges").find({"_from": edge_from, "_to": edge_to}).count():
                        db.collection("has_column_edges").insert({
                            "_from": edge_from,
                            "_to": edge_to,
                            "relation": "HAS_COLUMN"
                        })

            # 插入/更新关系
            if db_name in db_relationships:
                db_rels = db_relationships[db_name]
                logger.info(f"处理 {len(db_rels)} 个关系")

                for rel in db_rels:
                    source_key = column_keys.get((rel['source_database'], rel['source_table'], rel['source_column']))
                    target_key = column_keys.get((rel['target_database'], rel['target_table'], rel['target_column']))

                    if source_key and target_key:
                        # 字段外键关系
                        edge_from = f"columns/{source_key}"
                        edge_to = f"columns/{target_key}"
                        if not db.collection("foreign_key_edges").find({"_from": edge_from, "_to": edge_to}).count():
                            db.collection("foreign_key_edges").insert({
                                "_from": edge_from,
                                "_to": edge_to,
                                "relation": "FOREIGN_KEY_TO",
                                **rel
                            })

                        # 表引用关系
                        source_table_key = table_keys[(rel['source_database'], rel['source_table'])]
                        target_table_key = table_keys[(rel['target_database'], rel['target_table'])]
                        if not db.collection("refers_to_edges").find({
                            "_from": f"tables/{source_table_key}",
                            "_to": f"tables/{target_table_key}"
                        }).count():
                            db.collection("refers_to_edges").insert({
                                "_from": f"tables/{source_table_key}",
                                "_to": f"tables/{target_table_key}",
                                "relation": "REFERS_TO",
                                "via_columns": f"{rel['source_column']} → {rel['target_column']}"
                            })
                    else:
                        logger.warning(
                            f"外键引用失败：{rel['source_database']}.{rel['source_table']}.{rel['source_column']} → "
                            f"{rel['target_database']}.{rel['target_table']}.{rel['target_column']}"
                        )
            else:
                logger.info(f"数据库 {db_name} 没有关系需要处理")

    def extract_foreign_keys(self, tables):
        """提取字段中显式声明的外键关系"""
        relationships = []

        for table in tables:
            if 'database' not in table:
                raise HTTPException(status_code=400, detail=f"表 {table['table']} 缺少database属性")

            for col in table['columns']:
                # 检查外键标记和引用信息
                if col.get('foreign_key') is True and 'references' in col:
                    refs = col['references']
                    # 统一处理为列表
                    if isinstance(refs, dict):
                        refs = [refs]

                    for ref in refs:
                        if isinstance(ref, dict) and 'table' in ref and 'column' in ref:
                            # 获取目标数据库名，如果没有指定则默认为源数据库
                            result = table['database'].rsplit('/', 1)[-1]
                            database_name = result
                            target_database = ref.get('database',database_name)

                            relationships.append({
                                "source_database": result,
                                "source_table": table['table'],
                                "source_column": col['name'],
                                "target_database": target_database,
                                "target_table": ref['table'],
                                "target_column": ref['column']
                            })
        return relationships

    async def process(self, file_url: str, graph_name: str, file_name: str) -> Dict:
        """完整处理流程"""
        file_path = await self.download_file(file_url, file_name)
        yaml_files = []

        # 根据文件类型处理
        if file_path.lower().endswith('.zip'):
            logger.info(f"处理ZIP文件: {file_path}")
            yaml_files = self.extract_zip(file_path)
        elif file_path.lower().endswith(('.yaml', '.yml')):
            logger.info(f"处理YAML文件: {file_path}")
            yaml_files = [file_path]
        else:
            logger.error(f"不支持的文件格式: {file_path}")
            raise HTTPException(status_code=400, detail="仅支持YAML/YML或ZIP格式的文件")

        # 解析所有YAML文件
        all_tables = []
        for yaml_file in yaml_files:
            try:
                with open(yaml_file, 'r', encoding='utf-8') as file:
                    content = yaml.safe_load(file)

                    # 支持两种格式：
                    # 1. 包含多个表的列表格式
                    # 2. 单个表对象的字典格式
                    if isinstance(content, list):
                        # 文件包含多个表
                        all_tables.extend(content)
                        logger.info(f"解析文件成功(多表格式): {yaml_file}, 表数量: {len(content)}")
                    elif isinstance(content, dict):
                        # 文件包含单个表
                        all_tables.append(content)
                        logger.info(f"解析文件成功(单表格式): {yaml_file}, 添加1个表")
                    else:
                        logger.warning(f"文件内容格式错误，应为列表或字典: {yaml_file}")
                        continue
            except Exception as e:
                logger.error(f"解析YAML文件失败: {yaml_file}, 错误: {str(e)}")

        if not all_tables:
            logger.error("未找到有效的表数据")
            raise HTTPException(status_code=400, detail="未找到有效的表数据")

        # 检查所有表是否有database属性
        for table in all_tables:
            if 'database' not in table:
                raise HTTPException(status_code=400, detail=f"表 {table.get('table', '未知')} 缺少database属性")

        # 提取关系并导入ArangoDB
        relationships = self.extract_foreign_keys(all_tables)
        self.import_to_arango(all_tables, relationships, graph_name)
        logger.info(f"导入完成：共 {len(all_tables)} 个表，{len(relationships)} 个外键字段关系")

        # 获取数据库名（从第一个表）
        database_name = all_tables[0]['database']
        db_name = f"db_structure_{database_name}"

        # 清理临时文件
        try:
            shutil.rmtree(os.path.dirname(file_path))
            logger.info(f"临时文件已清理: {os.path.dirname(file_path)}")
        except Exception as e:
            logger.warning(f"临时文件清理失败: {str(e)}")

        return {
            "tables": {"count": len(all_tables), "list": all_tables},
            "relations": {"count": len(relationships), "list": relationships},
            "arango_import": "success",
            "database": db_name,
            "graph": graph_name
        }

# # FastAPI端点  local test
# @app.post("/rag/generate/er_from_yaml", response_model=YAMLERGenerateResponse)
# async def generate_er_from_yaml(request_data: YAMLERGenerateRequest = Body(...)):
#     service = YamlToArangoDBService()
#     try:
#         result = await service.process(
#             file_url=request_data.file_url,
#             file_name=request_data.file_name,
#             graph_name="table_er_graph"
#         )
#         return YAMLERGenerateResponse(data=result)
#     except Exception as e:
#         logger.error(f"ER图生成失败: {str(e)}", exc_info=True)
#         return YAMLERGenerateResponse(
#             status="error",
#             message=str(e)
#         )
#
# if __name__ == "__main__":
#     import uvicorn
#
#     uvicorn.run(app, host="localhost", port=8080)