import os
import zipfile
import tempfile
import aiohttp
import uuid
import datetime
from fastapi import FastAPI, HTTPException, BackgroundTasks
from pydantic import BaseModel, Field, field_validator, model_validator
from typing import Optional, List
import asyncio

from starlette.middleware.cors import CORSMiddleware

from app.config import Settings
from app.convert_to_yaml.tablerecovery.recovery import TableRecovery
from app.gen_db_er.import_sql_to_arangodb import SQLDataImporter
from app.import_sql_to_graph.sql_struct_to_arango import SQLToArangoDBService

# 假设这些设置已配置
# class Settings:
#     dify_prefix = ""
#     temp_dir_root = "/tmp"


settings = Settings()

app = FastAPI()


# Enable CORS for all origins, methods, and headers
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)
class DBGroup (BaseModel):
    id: Optional[int] = 0
    group_name: str
    databases: Optional[str] = None
# 请求模型
class SQLProcessingRequest(BaseModel):
    url: str = Field(..., description="SQL文件或ZIP文件的URL")
    w_semantic: float = Field(0.25, ge=0, le=1, description="语义相似度权重")
    w_name: float = Field(0.45, ge=0, le=1, description="名称匹配权重")
    w_type: float = Field(0.20, ge=0, le=1, description="类型兼容性权重")
    w_desc: float = Field(0.10, ge=0, le=1, description="描述相似度权重")
    output_dir: Optional[str] = Field(None, description="输出目录，如未提供则使用临时目录")
    db_groups:Optional[list[DBGroup]]
    del_groups:Optional[list[str]]
    # 单字段校验
    @field_validator("url")
    @classmethod
    def validate_url(cls, v: str):
        if not (v.endswith(".sql") or v.endswith(".zip")):
            raise ValueError("URL必须指向.sql或.zip文件")
        return v

    # 多字段整体校验
    @model_validator(mode="after")
    def check_weights(self):
        total = self.w_semantic + self.w_name + self.w_type + self.w_desc
        if abs(total - 1.0) > 0.001:
            raise ValueError("所有权重之和必须等于1")
        return self


# 响应模型
class ProcessingResponse(BaseModel):
    task_id: str
    status: str
    message: str
    output_dir: Optional[str] = None
    analysis_result: Optional[dict] = None  # 新增：分析结果


# 存储任务状态（生产环境中应使用数据库）
tasks = {}


async def download_file(url: str, file_path: str) -> str:
    """下载文件到指定路径"""
    if not url.startswith(settings.dify_prefix or "") and settings.dify_prefix and not url.startswith("http"):
        full_url = settings.dify_prefix.rstrip("/") + "/" + url.lstrip("/")
    else:
        full_url = url

    try:
        async with aiohttp.ClientSession() as session:
            async with session.get(full_url) as resp:
                if resp.status != 200:
                    raise HTTPException(status_code=400, detail=f"下载失败，HTTP {resp.status}")
                with open(file_path, "wb") as f:
                    while True:
                        chunk = await resp.content.read(1024 * 1024)
                        if not chunk:
                            break
                        f.write(chunk)
        return file_path
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"下载失败: {e}")

async def get_jwt_token() -> str:
    """获取 ArangoDB JWT Token"""
    arango_url = settings.arango_host
    arango_username = settings.arango_user
    arango_password = settings.arango_password
    async with aiohttp.ClientSession() as session:
        async with session.post(
            f"{arango_url}/_open/auth",
            json={"username": arango_username, "password": arango_password}
        ) as resp:
            if resp.status != 200:
                text = await resp.text()
                raise Exception(f"获取 JWT 失败: {resp.status}, {text}")
            data = await resp.json()
            return data["jwt"]


async def delete_arangodb_databases(del_groups: List[str]):
    """删除 ArangoDB 中的数据库"""
    if not del_groups:
        return

    # 1. 获取 JWT
    token = await get_jwt_token()
    headers = {
        "Authorization": f"bearer {token}",
        "accept": "*/*"
    }
    arango_url = settings.arango_host
    async with aiohttp.ClientSession(headers=headers) as session:
        for dbname in del_groups:
            if dbname.startswith("db_"):
                dbname = dbname[3:]
            db_to_delete = f"db_structure_{dbname}"

            try:
                async with session.delete(
                    f"{arango_url}/_db/_system/_api/database/{db_to_delete}"
                ) as resp:
                    if resp.status == 200:
                        print(f"✅ 成功删除数据库: {db_to_delete}")
                    elif resp.status == 404:
                        print(f"⚠️ 数据库不存在: {db_to_delete}")
                    else:
                        text = await resp.text()
                        print(f"❌ 删除数据库失败: {db_to_delete}, 状态码: {resp.status}, 响应: {text}")
            except Exception as e:
                print(f"🔥 删除数据库时出错: {db_to_delete}, 错误: {e}")


def merge_sql_files(db_groups: List[DBGroup], ddl_dir: str, tempdir: str) -> str:
    """合并SQL文件到新目录，并保留未分组的文件"""
    if not db_groups:
        return ddl_dir

    # 创建合并目录
    merged_dir = os.path.join(tempdir, "merged")
    os.makedirs(merged_dir, exist_ok=True)

    # 获取所有SQL文件
    all_sql_files = [f for f in os.listdir(ddl_dir) if f.endswith('.sql')]
    all_sql_bases = [os.path.splitext(f)[0] for f in all_sql_files]

    # 收集所有被分组包含的数据库名
    grouped_dbs = set()
    for group in db_groups:
        if not group.databases or group.id != 0:
            continue
        dbs = [db.strip() for db in group.databases.split(',')]
        grouped_dbs.update(dbs)

    # 处理每个分组
    for group in db_groups:
        if not group.databases or group.id != 0:
            continue
        dbs = [db.strip() for db in group.databases.split(',')]
        content = []
        for db in dbs:
            if db in all_sql_bases:
                idx = all_sql_bases.index(db)
                sql_file = all_sql_files[idx]
                with open(os.path.join(ddl_dir, sql_file), 'r', encoding='utf-8') as f:
                    content.append(f.read())
            else:
                print(f"警告: 数据库 {db} 对应的SQL文件未找到")
        if content:
            group_name = group.group_name
            if group_name.startswith("db_"):
                group_name = group_name[3:]
            merged_file_name = f"{group_name}.sql"
            merged_file_path = os.path.join(merged_dir, merged_file_name)
            with open(merged_file_path, 'w', encoding='utf-8') as f:
                f.write("\n".join(content))

    # 复制未分组的文件到合并目录
    for sql_file, sql_base in zip(all_sql_files, all_sql_bases):
        if sql_base not in grouped_dbs:
            src_path = os.path.join(ddl_dir, sql_file)
            dst_path = os.path.join(merged_dir, sql_file)
            import shutil
            shutil.copy2(src_path, dst_path)

    return merged_dir

async def process_sql_files(task_id: str, url: str, weights: dict, output_dir: str = None, db_groups: List[DBGroup] = None, del_groups: List[str] = None):
    """处理SQL文件的异步任务"""
    try:
        tasks[task_id] = {"status": "processing", "message": "开始下载文件"}

        # 删除ArangoDB数据库
        if del_groups:
            await delete_arangodb_databases(del_groups)

        # 创建临时目录
        timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
        unique = uuid.uuid4().hex[:8]
        tempdir = os.path.join(settings.temp_dir_root, f"sql_processing_{timestamp}_{unique}")
        os.makedirs(tempdir, exist_ok=True)

        # 下载文件
        filename = url.split("/")[-1]
        file_path = os.path.join(tempdir, filename)
        await download_file(url, file_path)
        tasks[task_id]["message"] = "文件下载完成"

        # 处理ZIP文件或SQL文件
        ddl_dir = tempdir
        if file_path.endswith('.zip'):
            zip_dir = os.path.join(tempdir, "extracted")
            os.makedirs(zip_dir, exist_ok=True)
            with zipfile.ZipFile(file_path, 'r') as zip_ref:
                zip_ref.extractall(zip_dir)
            ddl_dir = zip_dir  # 使用解压后的目录作为DDL目录
            tasks[task_id]["message"] = "ZIP文件解压完成"
        else:
            # 对于单个SQL文件，确保它在正确的目录结构中
            sql_dir = os.path.join(tempdir, "sql_files")
            os.makedirs(sql_dir, exist_ok=True)
            os.rename(file_path, os.path.join(sql_dir, filename))
            ddl_dir = sql_dir
            tasks[task_id]["message"] = "SQL文件准备完成"

        # 合并SQL文件（如果提供了db_groups）
        if db_groups:
            ddl_dir = merge_sql_files(db_groups, ddl_dir, tempdir)
            tasks[task_id]["message"] = "SQL文件合并完成"

        # 设置输出目录
        if output_dir is None:
            output_dir = os.path.join(tempdir, "output")
            os.makedirs(output_dir, exist_ok=True)

        tasks[task_id]["message"] = "开始处理SQL文件"

        # 使用TableRecovery处理文件
        recovery = TableRecovery(
            ddl_dir=ddl_dir,
            output_dir=output_dir,
            with_data=True,
            w_semantic=weights['w_semantic'],
            w_name=weights['w_name'],
            w_type=weights['w_type'],
            w_desc=weights['w_desc']
        )
        recovery()

        # 在TableRecovery完成后，调用SQL分析
        tasks[task_id]["message"] = "SQL外键恢复完成，开始分析SQL结构"

        # 创建SQL分析器实例
        sql_analyzer = SQLToArangoDBService()

        # 获取恢复后的SQL文件路径
        recovered_files = []
        for root, dirs, files in os.walk(output_dir):
            for file in files:
                if file.endswith('.sql'):
                    recovered_files.append(os.path.join(root, file))

        if not recovered_files:
            tasks[task_id]["message"] = "警告：未找到恢复后的SQL文件"
            analysis_result = {}
        else:
            # 如果有多个SQL文件，创建一个ZIP文件进行分析
            if len(recovered_files) > 1:
                # 创建ZIP文件
                zip_path = os.path.join(tempdir, "recovered_sql.zip")
                with zipfile.ZipFile(zip_path, 'w') as zipf:
                    for sql_file in recovered_files:
                        zipf.write(sql_file, os.path.basename(sql_file))
                # 分析ZIP文件
                analysis_result = sql_analyzer.process_file(zip_path, "table_er_graph")
            else:
                # 分析单个SQL文件
                analysis_result = sql_analyzer.process_file(recovered_files[0], "table_er_graph")

        tasks[task_id]["status"] = "completed"
        tasks[task_id]["message"] = "处理完成"
        tasks[task_id]["output_dir"] = output_dir
        tasks[task_id]["analysis_result"] = analysis_result  # 存储分析结果

    except Exception as e:
        tasks[task_id]["status"] = "error"
        tasks[task_id]["message"] = f"处理失败: {str(e)}"
        # 记录详细错误信息
        import traceback
        tasks[task_id]["error_details"] = traceback.format_exc()

@app.post("/rag/process-sql", response_model=ProcessingResponse)
async def process_sql(request: SQLProcessingRequest, background_tasks: BackgroundTasks):
    """处理SQL文件的端点"""
    # 生成任务ID
    task_id = str(uuid.uuid4())

    # 准备权重参数
    weights = {
        'w_semantic': request.w_semantic,
        'w_name': request.w_name,
        'w_type': request.w_type,
        'w_desc': request.w_desc
    }

    # 启动后台任务
    background_tasks.add_task(
        process_sql_files,
        task_id,
        request.url,
        weights,
        request.output_dir,
        request.db_groups,
        request.del_groups
    )

    # 初始化任务状态
    tasks[task_id] = {
        "status": "started",
        "message": "任务已创建，等待处理",
        "output_dir": None,
        "analysis_result": None
    }

    return ProcessingResponse(
        task_id=task_id,
        status="started",
        message="任务已创建，等待处理",
        output_dir=request.output_dir
    )

@app.get("/task-status/{task_id}")
async def get_task_status(task_id: str):
    """获取任务状态"""
    if task_id not in tasks:
        raise HTTPException(status_code=404, detail="任务不存在")

    task_info = tasks[task_id]
    return ProcessingResponse(
        task_id=task_id,
        status=task_info["status"],
        message=task_info["message"],
        output_dir=task_info.get("output_dir"),
        analysis_result=task_info.get("analysis_result")
    )

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="localhost", port=8080)