import asyncio
import datetime
import json
import logging
import os
import re
import shutil
import uuid
import zipfile
from contextlib import asynccontextmanager
from pathlib import Path
from typing import Annotated, Optional, List

import aiohttp
import aiomysql
import jwt
from fastapi import (
    FastAPI,
    UploadFile,
    File,
    Form,
    HTTPException,
    BackgroundTasks,
    Query,
    Body,
)
from fastapi import Request
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import PlainTextResponse, JSONResponse
from jinja2 import Template
from langgraph.graph import StateGraph, END
from pydantic import BaseModel

from app.config import settings
from app.convert_to_yaml.main import ProcessingResponse, process_sql_files, SQLProcessingRequest, tasks
from app.db_group.db_group_dao import DbGroupDAO, DeleteRequest, ResponseModel, DbGroupItem

from app.gen_db_er.import_sql_to_arangodb import ImportResponse, SQLDataImporter, ImportRequest
from app.gen_er_from_yaml.import_yaml_to_arango import YamlToArangoDBService, YAMLERGenerateResponse, \
    YAMLERGenerateRequest
from app.import_sql_to_graph.sql_struct_to_arango import SQLImportResponse, SQLToArangoDBService, SQLImportRequest

from app.migrate_neo4j.default import DefaultRelsToGraphMigrator
from app.upload_file_to_dify.upload_file_to_dify import UploadResponse, DifyUploadService, UploadRequest
from app.utils.email_utils import send_email, send_email_to_techtanium
from app.workflow import (
    WorkflowState,
    extract_archives,
    convert_to_markdown,
    optimize_documents,
    create_result_zip,
    send_result_email,
    custom_optimize_documents,
    insert_flow,
)
from app.zip_to_neo4j.zip_to_neo4j import ZipToNeo4jService, ZipToNeo4jResponse, ZipToNeo4jRequest

logging.basicConfig(
    level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(__name__)

# ======================
# LangGraph 工作流
# ======================
workflow = StateGraph(WorkflowState)
workflow.add_node("extract_archives", extract_archives)
workflow.add_node("convert_to_markdown", convert_to_markdown)
workflow.add_node("optimize_documents", optimize_documents)
workflow.add_node("custom_optimize_documents", custom_optimize_documents)
workflow.add_node("create_result_zip", create_result_zip)
workflow.add_node("send_result_email", send_result_email)
workflow.add_node("insert_flow", insert_flow)
workflow.set_entry_point("extract_archives")
workflow.add_edge("extract_archives", "convert_to_markdown")
workflow.add_edge("convert_to_markdown", "optimize_documents")
workflow.add_edge("optimize_documents", "custom_optimize_documents")
workflow.add_edge("custom_optimize_documents", "create_result_zip")
workflow.add_edge("create_result_zip", "send_result_email")
workflow.add_edge("send_result_email", "insert_flow")
workflow.add_edge("insert_flow", END)
convert_workflow = workflow.compile()

mysql_pool = None


def get_mysql_pool():
    global mysql_pool
    return mysql_pool


@asynccontextmanager
async def lifespan(app: FastAPI):
    global mysql_pool
    try:
        mysql_pool = await aiomysql.create_pool(
            host=settings.mysql_host,
            port=settings.mysql_port,
            user=settings.mysql_user,
            password=settings.mysql_password,
            db=settings.mysql_db,
            autocommit=True,
        )
        logger.info("Lifespan: MySQL pool created.")
    except Exception as e:
        logger.error(f"Lifespan: Failed to create MySQL pool: {e}")
        raise
    yield
    logger.info("Lifespan: closing MySQL pool...")
    if mysql_pool:
        mysql_pool.close()
        await mysql_pool.wait_closed()
        mysql_pool = None
    logger.info("Lifespan: MySQL pool closed.")


app = FastAPI(lifespan=lifespan)

# Enable CORS for all origins, methods, and headers
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)


def get_app():
    return app


@app.post("/rag/doc/convert")
async def convert_document(
        ownerName: Annotated[str, Form(alias="ownerName")],
        converterEmail: Annotated[str, Form(alias="converterEmail")],
        knowledgeBaseName: Annotated[str, Form(alias="knowledgeBaseName")],
        file: Annotated[UploadFile, File(...)],
        language: Annotated[str, Form()],
        background_tasks: BackgroundTasks,
):
    # 参数校验
    if not ownerName or not converterEmail or not knowledgeBaseName or not language:
        raise HTTPException(
            status_code=400,
            detail="ownerName, converterEmail, knowledgeBaseName, language are required.",
        )

    # 通过 ownerName 查询 ownerEmail
    pool = get_mysql_pool()
    if pool is None:
        raise HTTPException(status_code=503, detail="MySQL pool is not available.")
    async with pool.acquire() as conn:
        async with conn.cursor() as cur:
            await cur.execute(
                "SELECT owner_email FROM t_rag_owner WHERE owner_name=%s AND status='0'",
                (ownerName),
            )
            owner_row = await cur.fetchone()
            if not owner_row:
                raise HTTPException(
                    status_code=400, detail="ownerName not found or not valid."
                )
            ownerEmail = owner_row[0]
            #从dify直接查回来，不用经过数据库
            # await cur.execute(
            #     "SELECT 1 FROM t_knowledge_base WHERE knowledge_base_name=%s AND status='0'",
            #     (knowledgeBaseName,),
            # )
            # kb_result = await cur.fetchone()
            # if not kb_result:
            #     raise HTTPException(
            #         status_code=400, detail="knowledgeBaseName not found or not valid."
            #     )

    # Generate JWT token
    payload = {
        "ownerName": ownerName,
        "ownerEmail": ownerEmail,
        "converterEmail": converterEmail,
        "knowledgeBaseName": knowledgeBaseName,
        "language": language,
        "exp": datetime.datetime.now(datetime.timezone.utc)
               + datetime.timedelta(hours=settings.jwt_expiration_hours),
    }
    token = jwt.encode(payload, settings.jwt_secret_key, algorithm="HS256")

    content = await file.read()
    memory_file = {"filename": file.filename, "content": content}
    background_tasks.add_task(
        run_workflow,
        memory_file=memory_file,
        owner_name=ownerName,
        owner_email=ownerEmail,
        converter_email=converterEmail,
        knowledge_base_name=knowledgeBaseName,
        language=language,
        token=token,
        compiled_workflow=convert_workflow,
    )
    return PlainTextResponse(
        "program has receive files, we will convert this files and will send result to your email, please waiting"
    )


async def run_workflow(
        memory_file,
        owner_name,
        owner_email,
        converter_email,
        knowledge_base_name,
        language,
        token,
        compiled_workflow,
):
    timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
    unique_id = uuid.uuid4().hex[:8]
    os.makedirs(settings.temp_dir_root, exist_ok=True)
    temp_dir = os.path.join(settings.temp_dir_root, f"tmp_{timestamp}_{unique_id}")
    os.makedirs(temp_dir, exist_ok=True)
    temp_path = Path(temp_dir)
    file_path = temp_path / str(memory_file["filename"])
    with open(file_path, "wb") as f:
        f.write(memory_file["content"])
    # copy to nginx
    parent_name = Path(file_path).parent.name
    target_dir = os.path.join(settings.nginx_static_dir, parent_name)
    os.makedirs(target_dir, exist_ok=True)
    target_nginx_path = os.path.join(target_dir, str(memory_file["filename"]))
    shutil.copy(str(file_path), target_nginx_path)
    origin_url = f"{settings.nginx_url_prefix}/{parent_name}/{memory_file["filename"]}"
    initial_state = WorkflowState(
        mysql_pool=get_mysql_pool(),
        temp_path=temp_path,
        processed_files=[file_path],
        origin_url=origin_url,
        language=language,
        owner_name=owner_name,
        owner_email=owner_email,
        knowledge_base_name=knowledge_base_name,
        converter_email=converter_email,
        token=token,
    )
    try:
        await compiled_workflow.ainvoke(initial_state, {})
    except Exception as e:
        logger.error(f"run_workflow error: {e}", exc_info=True)
        body = Template(settings.converter_fail_email_template).render(
            errors=[{"error": f"System error: {str(e)}"}]
        )
        send_email(converter_email, body)
        shutil.rmtree(temp_path)


@app.get("/rag/owner")
async def get_rag_owner():
    try:
        pool = get_mysql_pool()
        if pool is None:
            raise HTTPException(status_code=503, detail="MySQL pool is not available.")
        async with pool.acquire() as conn:
            async with conn.cursor() as cur:
                await cur.execute(
                    "SELECT owner_name, owner_email FROM t_rag_owner WHERE status = '0'"
                )
                rows = await cur.fetchall()
        owners = [{"ownerName": row[0], "ownerEmail": row[1]} for row in rows]
        return JSONResponse(content=owners)
    except Exception as e:
        logger.error(f"Error fetching owners: {e}", exc_info=True)
        return JSONResponse(content={"error": str(e)}, status_code=500)


@app.get("/rag/knowledge")
async def get_rag_knowledge():
    try:
        pool = get_mysql_pool()
        if pool is None:
            raise HTTPException(status_code=503, detail="MySQL pool is not available.")
        async with pool.acquire() as conn:
            async with conn.cursor() as cur:
                await cur.execute(
                    "SELECT knowledge_base_name FROM t_knowledge_base WHERE status = '0'"
                )
                rows = await cur.fetchall()
        knowledge = [{"knowledgeBaseName": row[0]} for row in rows]
        return JSONResponse(content=knowledge)
    except Exception as e:
        logger.error(f"Error fetching knowledge: {e}", exc_info=True)
        return JSONResponse(content={"error": str(e)}, status_code=500)


@app.post("/rag/approve")
async def approve_document(request: Request, token: str = Query(...)):
    try:
        payload = await request.json()
        status = payload.get("status")
        document_type = payload.get("document_type")
        if status == "1":
            db_status = 1
        elif status == "2":
            db_status = 2
        else:
            raise HTTPException(status_code=400, detail="Invalid status")
        # 1. Parse and validate JWT token
        try:
            payload = jwt.decode(token, settings.jwt_secret_key, algorithms=["HS256"])
            owner_name = payload.get("ownerName")
            knowledge_base_name = payload.get("knowledgeBaseName")
        except jwt.ExpiredSignatureError:
            raise HTTPException(status_code=400, detail="Token has expired")
        except jwt.InvalidTokenError:
            raise HTTPException(status_code=400, detail="Invalid token")

        # 2. Check database record existence and status
        pool = get_mysql_pool()
        if pool is None:
            raise HTTPException(status_code=503, detail="MySQL pool is not available.")

        async with pool.acquire() as conn:
            async with conn.cursor() as cur:
                # Check if record exists and status is valid
                await cur.execute(
                    "SELECT id, status, creator_email, rag_owner FROM rag.t_approve_flow WHERE token = %s",
                    (token,),
                )
                result = await cur.fetchone()

                if not result:
                    raise HTTPException(status_code=404, detail="Record not found")

                record_id, current_status, creator_email, rag_owner = result

                if (
                        current_status != "0"
                ):  # Only allow approval if status is '0' (created)
                    raise HTTPException(
                        status_code=400,
                        detail=f"Record status is {current_status}, cannot approve",
                    )

                # 3. Update database record status to approved/rejected
                await cur.execute(
                    "UPDATE rag.t_approve_flow SET status = %s, import_file_type = %s, update_at = NOW() WHERE id = %s",
                    (db_status, document_type,record_id),
                )

        # Send notification email to creator_email
        try:
            body = Template(settings.approve_email_template).render(
                ownerName=rag_owner, result=status
            )
            send_email(creator_email, body)
        except Exception as e:
            logger.error(
                f"Failed to send approval notification email: {e}", exc_info=True
            )

        return JSONResponse(
            content={
                "message": (
                    "Document approved successfully"
                    if db_status == 1
                    else "Document rejected successfully"
                ),
                "ownerName": owner_name,
                "knowledgeBaseName": knowledge_base_name,
            }
        )

    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"Error in approve_document: {e}", exc_info=True)
        raise HTTPException(status_code=500, detail="Internal server error")


@app.get("/rag/approve")
async def get_approve_flow(token: str):
    try:
        pool = get_mysql_pool()
        if pool is None:
            raise HTTPException(status_code=503, detail="MySQL pool is not available.")
        async with pool.acquire() as conn:
            async with conn.cursor() as cur:
                await cur.execute(
                    """
                    SELECT id, token, creator_email, origin_url, file_url, rag_owner, rag_owner_email, knowledge_base_id, status, comment, 
                           create_by, create_at, update_at, updated_by,import_file_type,file_parse_info
                    FROM rag.t_approve_flow WHERE token = %s
                    """,
                    (token,),
                )
                row = await cur.fetchone()
        if not row:
            raise HTTPException(status_code=404, detail="Record not found")
        keys = [
            "id",
            "token",
            "creator_email",
            "origin_url",
            "file_url",
            "rag_owner",
            "rag_owner_email",
            "knowledge_base_id",
            "status",
            "comment",
            "create_by",
            "create_at",
            "update_at",
            "updated_by",
            "import_file_type",
            "file_parse_info",
        ]
        result = dict(zip(keys, row))
        # Convert datetime fields to string for JSON serialization
        for k in ["create_at", "update_at"]:
            if result.get(k) is not None and hasattr(result[k], "isoformat"):
                result[k] = result[k].isoformat(sep=" ", timespec="seconds")
        return JSONResponse(content=result)
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"Error in get_approve_flow: {e}", exc_info=True)
        raise HTTPException(status_code=500, detail="Internal server error")
# 定义请求模型
class EmailRequest(BaseModel):
    origin_url: str
    # zip_url: str
    owner_name: str
    converter_email: str
    language: str
    file_type: str
    file_name: str

async def download_file( url: str, file_name: str) -> str:
    if not url.startswith(settings.dify_prefix or "") and settings.dify_prefix and not url.startswith("http"):
        full_url = settings.dify_prefix.rstrip("/") + "/" + url.lstrip("/")
    else:
        full_url = url

    timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
    unique = uuid.uuid4().hex[:8]
    tempdir = os.path.join(settings.temp_dir_root, f"tmp_{timestamp}_{unique}")
    os.makedirs(tempdir, exist_ok=True)
    filename = file_name or full_url.split("/")[-1]
    path = os.path.join(tempdir, filename)
    try:
        async with aiohttp.ClientSession() as session:
            async with session.get(full_url) as resp:
                if resp.status != 200:
                    raise HTTPException(status_code=400, detail=f"下载失败，HTTP {resp.status}")
                with open(path, "wb") as f:
                    while True:
                        chunk = await resp.content.read(1024 * 1024)
                        if not chunk:
                            break
                        f.write(chunk)
        logger.info(f"Downloaded file to {path}")
        return path
    except Exception as e:
        logger.error(f"download error: {e}")
        raise HTTPException(status_code=500, detail=f"下载失败: {e}")
@app.post("/rag/send-approval-emails")
async def send_success_emails(request: EmailRequest):

    try:
        file_path = await download_file(request.origin_url,request.file_name)
        file_parse_info = ""
        if request.file_type == "sql":
            service = SQLDataImporter()
            database_info = await service.analyze_sql_file(file_path)
            file_parse_info = json.dumps(database_info,ensure_ascii=False)
        ownerName = request.owner_name
        converterEmail = request.converter_email
        language = request.language
        if not ownerName or not converterEmail or not language:
            raise HTTPException(
                status_code=400,
                detail="ownerName, converterEmail, language are required.",
            )

        # 通过 ownerName 查询 ownerEmail
        pool = get_mysql_pool()
        if pool is None:
            raise HTTPException(status_code=503, detail="MySQL pool is not available.")
        async with pool.acquire() as conn:
            async with conn.cursor() as cur:
                await cur.execute(
                    "SELECT owner_email FROM rag.t_rag_owner WHERE owner_name=%s AND status='0'",
                    (ownerName),
                )
                owner_row = await cur.fetchone()
                if not owner_row:
                    raise HTTPException(
                        status_code=400, detail="ownerName not found or not valid."
                    )
                ownerEmail = owner_row[0]

        # 渲染给owner的详细邮件
        # Generate JWT token
        payload = {
            "ownerName": request.owner_name,
            "ownerEmail": ownerEmail,
            "converterEmail": converterEmail,
            "knowledgeBaseName": request.file_type,
            "language": language,
            "exp": datetime.datetime.now(datetime.timezone.utc)
                   + datetime.timedelta(hours=settings.jwt_expiration_hours),
        }
        token = jwt.encode(payload, settings.jwt_secret_key, algorithm="HS256")
        logger.info(f"token: {token}")
        #
        # copy to nginx
        parent_name = Path(file_path).parent.name
        target_dir = os.path.join(settings.nginx_static_dir, parent_name)
        os.makedirs(target_dir, exist_ok=True)
        target_nginx_path = os.path.join(target_dir, request.file_name)
        shutil.copy(str(file_path), target_nginx_path)
        origin_url = f"{settings.nginx_url_prefix}/{parent_name}/{request.file_name}"

        owner_body = Template(settings.owner_email_template).render(
            origin_url=origin_url,
            owner_name=request.owner_name,
            converter_email=request.converter_email,
            language=request.language,
            token=token,
            file_type=request.file_type,
            file_name=request.file_name,
        )
        # 渲染给converter的简短通知
        converter_body = Template(settings.converter_success_email_template).render(
            ownerName=request.owner_name
        )
        logger.info(f"ownerEmail: {ownerEmail}")
        logger.info(f"converter_email: {request.converter_email}")
        # 发送邮件
        if ownerEmail.endswith("@gopomelo.com"):
            send_email(ownerEmail, owner_body)
            send_email(request.converter_email, converter_body)
        elif ownerEmail.endswith("@techtanium.com"):
            send_email_to_techtanium(ownerEmail, owner_body)
            send_email_to_techtanium(request.converter_email, converter_body)
        async with pool.acquire() as conn:
            async with conn.cursor() as cur:
                await cur.execute(
                    """
                    INSERT INTO rag.t_approve_flow (token, creator_email, file_url, origin_url, rag_owner, rag_owner_email,
                                                knowledge_base_id, status, comment, create_by, create_at,import_file_type,file_parse_info)
                    VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, NOW(),%s, %s)
                    """,
                    (
                        token,
                        request.converter_email,
                       origin_url,
                        origin_url,
                        request.owner_name,
                        ownerEmail,
                        request.file_type,
                        "0",
                        "upload successfully",
                        request.owner_name,
                        request.file_type,
                        file_parse_info
                    ),
                )
        return {
            "message": "Emails sent successfully",
            "recipients": {
                "owner": ownerEmail,
                "converter": request.converter_email
            }
        }

    except Exception as e:
        # raise HTTPException(status_code=500, detail=f"Email sending failed: {str(e)}")
        logger.error(f"Email sending failed: {str(e)}")
        return {
            "message": f"send email error: {e}",
            "recipients": {
                "owner": ownerEmail,
                "converter": request.converter_email
            }
        }


@app.post("/rag/sync/db")
async def download_document(
        url: Annotated[str, Form()],
        name: Annotated[str, Form()],
        # importKG: Annotated[str, Form()],
        background_tasks: BackgroundTasks,
):
    # 1. 参数校验
    # if not url or not name or importKG is None:
    #     raise HTTPException(
    #         status_code=400, detail="url, name, isImportKG are required."
    #     )
    # if importKG not in ["Yes", "No"]:
    #     raise HTTPException(status_code=400, detail="isImportKG must be Yes or No.")
    if not (name.endswith(".sql") or name.endswith(".zip")):
        raise HTTPException(status_code=400, detail="name must end with sql or zip.")

    # 2. 把任务放到后台，不等待
    background_tasks.add_task(sync_db_async_job, url, name)

    # 3. 立即返回响应
    return JSONResponse(
        content={"message": "Task accepted, processing in background."},
        status_code=202
    )
async def sync_db_async_job(url: str, name: str):
    # 1. 下载文件（支持 zip/sql）
    file_path = await download_file(url, name)

    sql_files = []
    if file_path.lower().endswith(".zip"):
        # 解压 zip 文件
        extract_dir = os.path.splitext(file_path)[0]
        os.makedirs(extract_dir, exist_ok=True)
        with zipfile.ZipFile(file_path, "r") as zip_ref:
            zip_ref.extractall(extract_dir)
        # 收集 SQL 文件
        for root, _, files in os.walk(extract_dir):
            for f in files:
                if f.lower().endswith(".sql"):
                    sql_files.append(os.path.join(root, f))
        if not sql_files:
            raise Exception("ZIP 文件中未找到任何 SQL 文件")
        logger.info(f"Extracted SQL files: {sql_files}")
    elif file_path.lower().endswith(".sql"):
        sql_files.append(file_path)
    else:
        raise Exception("仅支持 .sql 或 .zip 格式的文件")

    pool = get_mysql_pool()
    if pool is None:
        raise Exception("MySQL pool is not available.")

    # 逐个 SQL 文件执行
    for sql_file in sql_files:
        logger.info(f"Processing SQL file: {sql_file}")
        with open(sql_file, "r", encoding="utf-8") as f:
            sql_content = f.read()

        # 2. 校验 SQL 文件, 检查数据库是否存在
        use_db_matches = re.findall(
            r"USE\s+`?([a-zA-Z0-9_]+)`?\s*;", sql_content, re.IGNORECASE
        )
        if not use_db_matches:
            raise Exception("SQL 文件中必须包含 USE 语句指定数据库，例如 USE dbname;。")
        if len(set(use_db_matches)) > 1:
            raise Exception(f"SQL 文件只能指定一个数据库，当前检测到多个: {set(use_db_matches)}")
        db_name = use_db_matches[0]

        async with pool.acquire() as conn:
            async with conn.cursor() as cur:
                await cur.execute("SHOW DATABASES LIKE %s", (db_name,))
                db_exist = await cur.fetchone()
                if db_exist:
                    logger.info(f"Database {db_name} already exists, skip import.")
                    continue  # 跳过已存在的数据库

        # 3. 导入 SQL
        sql_statements = [stmt.strip() for stmt in sql_content.split(";") if stmt.strip()]
        async with pool.acquire() as conn:
            try:
                await conn.begin()
                async with conn.cursor() as cur:
                    for stmt in sql_statements:
                        try:
                            await cur.execute(stmt)
                        except Exception as e:
                            logger.error(f"Error executing SQL: {stmt}\nError: {e}")
                            await conn.rollback()
                            raise Exception(f"Error executing SQL: {stmt}\nError: {e}")
                await conn.commit()
                logger.info(f"All SQL statements executed successfully from {sql_file}")
            except Exception as e:
                logger.error(f"Transaction failed, rolling back. Error: {e}")
                await conn.rollback()
                raise

        # # 4. 如果需要，启动 KG 迁移
        # if isImportKG:
        #     asyncio.create_task(migrate_to_kg(sql_file, db_name))


async def migrate_to_kg(file_path, db_name):
    import time
    start_time = time.time()
    logger.info(f"Start migrating {file_path} (db: {db_name}) to KG...")
    rdb_config = {
        "dialect": "mysql",
        "driver": "mysqlconnector",
        "user": settings.mysql_user,
        "password": settings.mysql_password,
        "host": settings.mysql_host,
        "port": settings.mysql_port,
        "database": db_name
    }
    neo4j_config = {
        "uri": settings.neo4j_uri,
        "user": settings.neo4j_user,
        "password": settings.neo4j_password,
    }
    migrator = DefaultRelsToGraphMigrator(rdb_config, neo4j_config)
    migrator.run_migration()
    elapsed = time.time() - start_time
    logger.info(f"Migrate {file_path} (db: {db_name}) to KG finished. Elapsed: {elapsed:.2f}s")


# 初始化服务实例
# er_service = SQLToNeo4jService()


# 定义接口
# @app.post("/rag/generate/er", response_model=ERGenerateResponse)
# async def generate_er(
#         request_data: ERGenerateRequest = Body(...)  # 使用Pydantic模型接收JSON
# ):
#     """
#     解析SQL文件中的表关系，生成ER图结构并导入Neo4j
#
#     流程：
#     1. 从URL下载SQL文件
#     2. 提取表名和外键关系
#     3. 生成Neo4j Cypher语句
#     4. 导入到指定的Neo4j数据库
#     """
#     try:
#         sql_url = request_data.sql_url
#         database = request_data.er_database
#         result = await er_service.process(sql_url, database)
#         return {
#             "status": "success",
#             "data": result
#         }
#     except HTTPException as e:
#         return {
#             "status": "error",
#             "message": e.detail
#         }
#     except Exception as e:
#         logger.error(f"接口处理异常: {str(e)}")
#         return {
#             "status": "error",
#             "message": f"服务器内部错误: {str(e)}"
#         }
@app.post("/rag/import-sql-data", response_model=ImportResponse)
async def import_sql_data(req: ImportRequest = Body(...)):
    svc = SQLDataImporter()
    try:
        res = await svc.process(req.file_url, req.file_name, req.arango_db_hint)
        return ImportResponse(data=res)
    except HTTPException as he:
        logger.error(f"Import failed: {he.detail}")
        raise he
    except Exception as e:
        logger.exception("Unexpected error during import")
        return ImportResponse(status="error", message=str(e))

# yamler_service = YamlToNeo4jService()
# @app.post("/rag/generate/er_from_yaml", response_model=YAMLERGenerateResponse)
# async def generate_er(
#         request_data: YAMLERGenerateRequest = Body(...)  # 使用Pydantic模型接收JSON
# ):
#     try:
#         yaml_url = request_data.yaml_url
#         database = request_data.er_database
#         result = await yamler_service.process(yaml_url, database)
#         return {
#             "status": "success",
#             "data": result
#         }
#     except HTTPException as e:
#         return {
#             "status": "error",
#             "message": e.detail
#         }
#     except Exception as e:
#         logger.error(f"接口处理异常: {str(e)}")
#         return {
#             "status": "error",
#             "message": f"服务器内部错误: {str(e)}"
#         }
@app.post("/rag/generate/er_from_yaml", response_model=YAMLERGenerateResponse)
async def generate_er_from_yaml( request_data: YAMLERGenerateRequest = Body(...)):
    service = YamlToArangoDBService()
    try:
        result = await service.process(
            file_url=request_data.file_url,
            # db_name=request_data.db,  # 使用指定的数据库名
            graph_name="table_er_graph",
            file_name=request_data.file_name
        )
        return YAMLERGenerateResponse(data=result)
    except Exception as e:
        logger.error(f"ER图生成失败: {str(e)}", exc_info=True)
        return YAMLERGenerateResponse(
            status="error",
            message=str(e)
        )

zip_service = ZipToNeo4jService()
@app.post("/rag/import/zip_to_neo4j", response_model=ZipToNeo4jResponse)
async def generate_er(
        request_data: ZipToNeo4jRequest = Body(...)  # 使用Pydantic模型接收JSON
):
    try:
        zip_url = request_data.zip_url
        await zip_service.process(zip_url)
        return {
            "status": "success",
            "data": ""
        }
    except HTTPException as e:
        return {
            "status": "error",
            "message": e.detail
        }
    except Exception as e:
        logger.error(f"接口处理异常: {str(e)}")
        return {
            "status": "error",
            "message": f"服务器内部错误: {str(e)}"
        }


class FileRequest(BaseModel):
    url: str
    file_name: Optional[str] = None


def detect_zip_file_types(zip_path: str) -> set:
    """检测ZIP文件中所有文件的扩展名类型"""
    extensions = set()
    try:
        with zipfile.ZipFile(zip_path, 'r') as zf:
            for name in zf.namelist():
                # 跳过目录和隐藏文件
                if not name.endswith('/') and not name.startswith('__MACOSX/'):
                    ext = os.path.splitext(name)[1].lower()
                    if ext:  # 确保有扩展名
                        extensions.add(ext)
        return extensions
    except zipfile.BadZipFile:
        raise HTTPException(status_code=400, detail="无效的ZIP文件格式")



@app.post("/rag/check-zip-file-type")
async def check_zip_file_type(request: FileRequest):
    """
    检测ZIP文件中的文件类型

    参数:
    - url: 文件URL（相对或绝对路径）
    - file_name: 可选的文件名

    返回:
    - 单一文件类型或错误信息
    """
    try:
        # 下载ZIP文件
        zip_path = await download_file(request.url, request.file_name)

        # 检测文件类型
        extensions = detect_zip_file_types(zip_path)

        # 清理临时文件
        try:
            os.remove(zip_path)
            os.rmdir(os.path.dirname(zip_path))
        except Exception as clean_error:
            logger.warning(f"清理临时文件失败: {str(clean_error)}")

        # 检查结果
        if not extensions:
            raise HTTPException(status_code=400, detail="ZIP文件中未找到有效文件")

        if len(extensions) > 1:
            sorted_exts = sorted(extensions)
            ext_str = '/'.join(sorted_exts)
            # 如果包含 yaml/yml/sql → 报错
            forbidden = {".yaml", ".yml", ".sql"}
            if extensions & forbidden:  # 集合有交集
                raise HTTPException(
                    status_code=400,
                    detail=f"文件内包含 {ext_str} 类型文件，一次只能导入相同类型的文件"
                )

        # 返回单一类型
        file_type = next(iter(extensions))
        return file_type

    except HTTPException as he:
        raise he
    except Exception as e:
        logger.exception("文件类型检测失败")
        raise HTTPException(status_code=500, detail=f"服务器内部错误: {str(e)}")

@app.post("/rag/import/sql_struct", response_model=SQLImportResponse)
async def import_sql_to_arango(request_data: SQLImportRequest = Body(...)):
    service = SQLToArangoDBService()
    try:
        result = await service.process(
            file_url=request_data.file_url,
            file_name=request_data.file_name,
            graph_name=request_data.graph_name,
        )
        return SQLImportResponse(data=result)
    except Exception as e:
        logger.error(f"SQL导入失败: {str(e)}", exc_info=True)
        return SQLImportResponse(
            status="error",
            message=str(e)
        )
# ----------------- FastAPI 路由 -----------------
@app.post("/rag/upload_to_dify", response_model=UploadResponse)
async def upload_to_dify(request_data: UploadRequest = Body(...)):
    service = DifyUploadService()
    try:
        result = await service.process(request_data)
        return UploadResponse(data=result)
    except Exception as e:
        logger.error(f"上传失败: {str(e)}", exc_info=True)
        return UploadResponse(status="error", message=str(e))



# ========== FastAPI 接口 ==========
dao = DbGroupDAO()

@app.post("/rag/db_group/upsert", response_model=ResponseModel)
def upsert(records: List[DbGroupItem]):
    try:
        dao.upsert([r.dict() for r in records])
        return ResponseModel(success=True, message="Upsert successful")
    except Exception as e:
        return ResponseModel(success=False, message=str(e))

@app.post("/rag/db_group/delete", response_model=ResponseModel)
def delete(req: DeleteRequest):
    try:
        dao.delete(req.ids)
        return ResponseModel(success=True, message="Delete successful")
    except Exception as e:
        return ResponseModel(success=False, message=str(e))

@app.get("/rag/db_group/list", response_model=ResponseModel)
def list_all():
    try:
        rows = dao.list_all()
        return ResponseModel(success=True, message="Query successful", data=rows)
    except Exception as e:
        return ResponseModel(success=False, message=str(e))

@app.post("/rag/parse_fk_from_sql", response_model=ProcessingResponse)
async def process_sql(request: SQLProcessingRequest, background_tasks: BackgroundTasks):
    """处理SQL文件的端点"""
    # 生成任务ID
    task_id = str(uuid.uuid4())

    # 准备权重参数
    weights = {
        'w_semantic': request.w_semantic,
        'w_name': request.w_name,
        'w_type': request.w_type,
        'w_desc': request.w_desc
    }

    # 启动后台任务
    background_tasks.add_task(
        process_sql_files,
        task_id,
        request.url,
        weights,
        request.output_dir,
        request.db_groups,
        request.del_groups
    )
    # 初始化任务状态
    tasks[task_id] = {
        "status": "started",
        "message": "任务已创建，等待处理",
        "output_dir": None,
        "analysis_result": None
    }

    return ProcessingResponse(
        task_id=task_id,
        status="started",
        message="任务已创建，等待处理",
        output_dir=request.output_dir
    )


if __name__ == "__main__":
    import uvicorn

    uvicorn.run(app, host="0.0.0.0", port=80)
