import asyncio
from datetime import datetime

from fastapi import APIRouter, UploadFile, File, HTTPException, Depends, Query, Form,BackgroundTasks
from typing import List, Optional
from pydantic import BaseModel
from fastapi.responses import StreamingResponse

from auth_service.auths import verify_token
from file.services import upload, delete_local_file, file_to_chunks
from file.web_load import web_crawl, web_chunk
from tables.file import Files
from tables.knowledge import KG
from tables.web import Webs
from tables.vector import Vectors
from utils.main import str_to_hash
from fastapi.responses import FileResponse
import os
from fastapi.concurrency import run_in_threadpool
router = APIRouter(
    prefix="/rag/file",
    tags=["file"],
    responses={404: {"description": "Not found"}}
)

class FileStopInfo(BaseModel):
    id: int
    stop: bool = False
    type: str #类型 0：文件 1：web

class FileChunks(BaseModel):
    filename: str = None
    rag_id: str
    base_url: str = None
    single_url: str = None
    limit: int = 10
    offset: int = 0
    keyword:str = None

class ChunkInfo(BaseModel):
    id: str
    text: str
    rag_id: str
    filename: str
    single_url: str
    description: str = ""


@router.post("/upload", summary="文件上传")
async def upload_api(
        files: List[UploadFile] = File(...),
        rag_id: str = Form(),
        current_user=Depends(verify_token)
):
    try:

        await upload(files, rag_id, current_user)
        return {"code": 200, "message": "文件上传成功"}
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"文件上传失败: {str(e)}")


@router.get("/get_files", summary="获取指定知识库的文件列表")
async def get_files_api(
        rag_id: str,
        type: int = 0,
        limit: int = 10,
        offset: int = 0,
        query:str = None,
        current_user=Depends(verify_token)
):
    result={}
    try:
        # if type == 0:
        #     files = Files.get_files_by_rag_id(current_user.id, rag_id)
        # else:
        #     files = Webs.get_webs_by_rag_id(current_user.id, rag_id)
        if type == 0:
            total_count_files = Files.get_files_count_by_rag_id(current_user.id, rag_id,query)
            files = Files.get_files_by_rag_id(current_user.id, rag_id,query)
            # 对合并后的结果进行分页
            paginated_files = files[offset:offset + limit]
            result = {
                "total_count": total_count_files,  # 总条数
                "files": paginated_files  # 分页后的结果
            }
            return {"code": 200, "message": "文件列表获取成功", "total_count": total_count_files,
                    "data": paginated_files}

        elif type == 1:
            files = Webs.get_webs_by_rag_id(current_user.id, rag_id,query)
            total_count_webs = Webs.get_webs_count_by_rag_id(current_user.id, rag_id,query)
            # 对合并后的结果进行分页
            paginated_files = files[offset:offset + limit]

            result = {
                "total_count": total_count_webs,  # 总条数
                "files": paginated_files  # 分页后的结果
            }
            return {"code": 200, "message": "文件列表获取成功", "total_count": total_count_webs, "data": paginated_files}
        else:

            # 获取两个查询结果的总条数
            total_count_files = Files.get_files_count_by_rag_id(current_user.id, rag_id,query)
            total_count_webs = Webs.get_webs_count_by_rag_id(current_user.id, rag_id,query)
            total_count = total_count_files + total_count_webs

            files_from_files = Files.get_files_by_rag_id(current_user.id, rag_id,query)
            files_from_webs = Webs.get_webs_by_rag_id(current_user.id, rag_id,query)
            files = files_from_files + files_from_webs  # 合并两个查询结果

            # 对合并后的结果进行分页
            paginated_files = files[offset:offset + limit]

            return {"code": 200, "message": "文件列表获取成功", "total_count": total_count, "data": paginated_files}


    except Exception as e:
        raise HTTPException(status_code=500, detail=f"文件列表获取失败: {str(e)}")


@router.delete("/delete", summary="删除文件")
async def delete_file_api(
        body: FileChunks,
        current_user=Depends(verify_token)
):
    try:
        if not body.single_url:
            # 获取文件信息
            fileinfo = Files.get_file(current_user.id, body.filename, body.rag_id)
            # 删除文件信息
            res = Files.delete_file(current_user.id, body.filename, body.rag_id)
            if not res:
                raise Exception("文件信息删除失败")
            # 删除本地文件
            is_chunk = fileinfo.is_chunk
            res = delete_local_file(fileinfo.filepath)
            if not res:
                raise Exception("本地文件删除失败")
            source = body.filename
        else:
            # 从数据库获取sub_urls
            webinfo = Webs.get_web(current_user.id, body.base_url, body.rag_id)
            is_chunk = webinfo.is_chunk
            # 删除某个网页
            sub_urls = webinfo.sub_urls.replace(f" {body.single_url}", "")
            res = Webs.update_single(current_user.id, body.base_url, body.rag_id, sub_urls)
            if not res:
                raise Exception("网页信息删除失败")
            source = body.single_url
        # 删除分块数据
        if is_chunk:
            collection_name = str_to_hash(body.rag_id)
            await Vectors.delete_chunks(collection_name, source)

        return {"code": 200, "message": "文件删除成功"}
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"文件删除失败: {str(e)}")


@router.post("/file_chunks", summary="获取文档分块信息")
async def get_file_chunks_api(
        body: FileChunks,
        current_user=Depends(verify_token)
):
    try:
        source = body.filename if body.filename else body.single_url
        collection_name = str_to_hash(body.rag_id)
        keyword = body.keyword
        objects = Vectors.get_chunks(collection_name, source, body.offset, body.limit, keyword)
        return  objects
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"文件分块信息获取失败: {str(e)}")


@router.put("/update_chunk", summary="编辑分块内容")
async def update_chunk_api(
        body: ChunkInfo,
        current_user=Depends(verify_token)
):
    try:
        collection_name = str_to_hash(body.rag_id)
        embed = KG.get_knowledge(current_user.id, body.rag_id).embed
        await Vectors.update_chunk(collection_name, body.id, body.text, body.description, embed)

        return {"code": 200, "message": "文件分块内容修改成功"}
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"文件分块内容修改失败: {str(e)}")


@router.delete("/delete_chunk", summary="删除块")
async def delete_chunk_api(
        body: ChunkInfo,
        current_user=Depends(verify_token)
):
    try:
        collection_name = str_to_hash(body.rag_id)
        Vectors.delete_chunk_by_id(collection_name=collection_name, chunk_id=body.id)
        return {"code": 200, "message": "文本块删除成功"}
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"文本块删除失败: {str(e)}")


class CreateChunk(BaseModel):
    text: str
    rag_id: str
    description: str = ""
    filename: str = None
    single_url: str = None
@router.put("/create_chunk", summary="插入文本块")
async def create_chunk_api(
        body: CreateChunk,
        current_user=Depends(verify_token)
):
    try:
        kg = KG.get_knowledge(current_user.id, body.rag_id)
        if not kg.is_set:
            raise Exception("知识库未设置")
        source = body.filename if body.filename else body.single_url
        collection_name = str_to_hash(body.rag_id)
        embed = KG.get_knowledge(current_user.id, body.rag_id).embed
        await Vectors.create_chunk(
            collection_name=collection_name,
            filename=source,
            text=body.text,
            description=body.description,
            embed=embed
        )
        return {"code": 200, "message": "文本块创建成功"}
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"创建文本块失败: {str(e)}")


@router.put("/stop_chunk", summary="停用分块")
async def stop_chunk_api(
        body: ChunkInfo,
        current_user=Depends(verify_token)
):
    try:
        collection_name = str_to_hash(body.rag_id)
        Vectors.stop_chunk(collection_name, body.id)
        return {"code": 200, "message": "文本块停用成功"}
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"文本块停用失败: {str(e)}")


class WebInfo(BaseModel):
    base_url: str
    rag_id: str
    dis_url: str = ""
    dis_suffix: str = ""


@router.post("/create_web", summary="创建站点")
async def create_web_api(
        body: WebInfo,
        current_user=Depends(verify_token)
):
    try:
        now = datetime.now()
        # 格式化为 yyyy-MM-dd HH:mm:ss
        formatted_date_time = now.strftime('%Y-%m-%d %H:%M:%S')

        res = Webs.insert_new_web(current_user.id, body.base_url, body.rag_id, body.dis_url, body.dis_suffix,formatted_date_time)
        if not res:
            raise Exception("站点创建失败")
        return {"code": 200, "message": "站点创建成功"}
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"站点创建失败: {str(e)}")


@router.put("/update_web", summary="修改站点")
async def update_web_api(
        body: WebInfo,
        current_user=Depends(verify_token)
):
    try:
        webinfo = Webs.get_web(current_user.id, body.base_url, body.rag_id)
        # 如果已经创建了分块，则删除分块
        if webinfo.is_chunk:
            collection_name = str_to_hash(body.rag_id)
            for source in webinfo.sub_urls.split(" "):
                await Vectors.delete_chunks(collection_name, source)
        res = Webs.update_web(current_user.id, body.base_url, body.rag_id, body.dis_url, body.dis_suffix)
        if not res:
            raise Exception("站点修改失败")
        return {"code": 200, "message": "站点修改成功"}
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"站点修改失败: {str(e)}")


@router.delete("/delete_web", summary="删除站点")
async def delete_web_api(
        body: WebInfo,
        current_user=Depends(verify_token)
):
    try:
        webinfo = Webs.get_web(current_user.id, body.base_url, body.rag_id)
        # 如果已经创建了分块，则删除分块
        if webinfo.is_chunk:
            collection_name = str_to_hash(body.rag_id)
            for source in webinfo.sub_urls.split(" "):
                await Vectors.delete_chunks(collection_name, source)
        res = Webs.delete_base_url(current_user.id, body.base_url, body.rag_id)
        if not res:
            raise Exception("站点删除失败")
        return {"code": 200, "message": "站点删除成功"}
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"站点删除失败: {str(e)}")


class GetWeb(BaseModel):
    base_url: str
    rag_id: str

@router.post("/get_web", summary="获取站点下爬取的网页列表")
async def get_web_api(
        body: GetWeb,
        current_user=Depends(verify_token)
):
    try:
        webinfo = Webs.get_web(current_user.id, body.base_url, body.rag_id)
        if not webinfo.sub_urls:
            raise Exception("网站还未爬取")
        data = webinfo.sub_urls.split(" ")
        return {"code": 200, "message": "网页列表获取成功", "data": data}
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"网页列表获取失败: {str(e)}")


@router.post("/crawl_web", summary="爬取web站点网页")
async def crawl_web_api(
        body: GetWeb,
        current_user=Depends(verify_token)
):
    try:
        webinfo = Webs.get_web(current_user.id, body.base_url, body.rag_id)
        dis_url = f"{webinfo.dis_url}{webinfo.sub_urls}"
        sub_urls = web_crawl(webinfo.base_url, webinfo.dis_suffix, dis_url)
        kg = KG.get_knowledge(current_user.id, body.rag_id)
        # 如果站点已创建过分块，将新爬取的网页分块
        if webinfo.is_chunk:
            await web_chunk(
                collection_name=str_to_hash(body.rag_id),
                userid=current_user.id,
                base_url=webinfo.base_url,
                rag_id=body.rag_id,
                url_list=sub_urls,
                chunk_type=kg.chunk_type,
                embed=kg.embed,
                llm=kg.llm
            )
        # 拼接新扫描到的链接
        Webs.update_single(current_user.id, body.base_url, body.rag_id, f"{' '.join(sub_urls)} {webinfo.sub_urls}")
        return {"code": 200, "message": "站点爬取成功"}
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"站点爬取失败: {str(e)}")


class FileToChunks(BaseModel):
    rag_id: str
    filename: str

async def create_chunk_task(current_user_id: int, body: FileToChunks):
    try:
        kg = await run_in_threadpool(KG.get_knowledge, current_user_id, body.rag_id)
        if not kg.is_set:
            raise Exception("知识库未设置")
        fileinfo = await run_in_threadpool(Files.get_file, current_user_id, body.filename, body.rag_id)
        collection_name = str_to_hash(body.rag_id)
        # 如果已经创建了分块，则删除分块
        if fileinfo.is_chunk:
            await Vectors.delete_chunks(collection_name, fileinfo.filename)
        await file_to_chunks(
            collection_name=collection_name,
            userid=current_user_id,
            filepath=fileinfo.filepath,
            rag_id=body.rag_id,
            chunk_type=kg.chunk_type,
            embed=kg.embed,
            llm=kg.llm
        )
    except Exception as e:
        # 这里可以记录日志或进行其他处理
        print(f"创建文本块失败: {str(e)}")


@router.post("/file_to_chunks", summary="文件分块")
async def file_to_chunks_api(
        body: FileToChunks,
        background_tasks: BackgroundTasks,
        current_user=Depends(verify_token)
):
    try:

        background_tasks.add_task(create_chunk_task, current_user.id, body)
        return {"code": 200, "message": "文本块创建任务已提交"}
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"文件分块失败: {str(e)}")


@router.post("/file_chunk_progress", summary="文件分块进度")
async def file_chunk_progress_api(
        body: FileToChunks,
        current_user=Depends(verify_token)
):
    try:
        progress = await Files.get_file_chunk_progress(userid=current_user.id, filename=body.filename, rag_id=body.rag_id)
        return {"code": 200, "message": "文件分块进度", "data": progress}
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"文件分块进度失败: {str(e)}")

class FilesToChunks(BaseModel):
    rag_id: str

@router.post("/files_to_chunks", summary="多文件分块")
async def files_to_chunks_api(
        body: FilesToChunks,
        current_user=Depends(verify_token)
):
    try:
        kg = KG.get_knowledge(current_user.id, body.rag_id)
        if not kg.is_set:
            raise Exception("知识库未设置")
        files = Files.get_files_by_rag_id(userid=current_user.id, rag_id=body.rag_id)

        # 使用 asyncio 并行处理每个文件的分块
        async def generate_progress():
            tasks = [file_to_chunks(
                collection_name=str_to_hash(body.rag_id),
                userid=current_user.id,
                filepath=file.filepath,
                rag_id=body.rag_id,
                chunk_type=kg.chunk_type,
                embed=kg.embed,
                llm=kg.llm
            ) for file in files if file.is_chunk is False]

            # 将多个生成器合并并异步执行
            await asyncio.gather(*tasks)

        await generate_progress()
        return {"code": 200, "message": "多文件分块成功"}

    except Exception as e:
        raise HTTPException(status_code=500, detail=f"多文件分块失败: {str(e)}")


@router.post("/webs_to_chunks", summary="站点分块")
async def webs_to_chunks_api(
        body: GetWeb,
        current_user=Depends(verify_token)
):
    try:
        kg = KG.get_knowledge(current_user.id, body.rag_id)
        if not kg.is_set:
            raise Exception("知识库未设置")
        webinfo = Webs.get_web(userid=current_user.id, base_url=body.base_url, rag_id=body.rag_id)
        await web_chunk(
            collection_name=str_to_hash(body.rag_id),
            userid=current_user.id,
            base_url=body.base_url,
            url_list=webinfo.sub_urls.strip().split(" "),
            rag_id=body.rag_id,
            chunk_type=kg.chunk_type,
            embed=kg.embed,
            llm=kg.llm
        )
        return {"code": 200, "message": "站点分块成功"}
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"网页分块失败: {str(e)}")

@router.post("/webs_chunk_progress", summary="站点分块进度")
async def webs_chunk_progress_api(
        body: GetWeb,
        current_user=Depends(verify_token)
):
    try:
        progress = await Webs.get_webs_chunk_progress(userid=current_user.id, base_url=body.base_url, rag_id=body.rag_id)
        return {"code": 200, "message": "站点分块进度", "data": progress}
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"站点分块进度失败: {str(e)}")

@router.post("/update_file_status", summary="修改文件状态")
async def update_file_status(
        body: FileStopInfo,
        current_user=Depends(verify_token)
):
    try:
        if body.type=="0":
            res = Files.update_file_status(file_id=body.id,stop=body.stop)
        else:
            res = Webs.update_web_status(web_id=body.id,stop=body.stop)
        if not res:
            raise Exception("文件停用失败")
        return {"code": 200, "message": "文件停用成功"}
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"文件停用失败: {str(e)}")

@router.get("/download_file", summary="下载文件")
async def download_file(
        file_id:int,
        current_user=Depends(verify_token)
):
    try:
        res = Files.get_file_by_id(file_id=file_id)
        if not res:
            raise HTTPException(status_code=404, detail="File not found")
        file_path = res.filepath
        if not os.path.isfile(file_path):
            raise HTTPException(status_code=404, detail="File not found")
        return FileResponse(file_path, media_type='application/octet-stream', filename=os.path.basename(file_path))

    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

@router.delete("/delete_file_by_id",summary="根据文件id删除文件")
async  def delete_file_by_id(
        file_id:int,
        current_user=Depends(verify_token)
):
    try:
        fileinfo = Files.get_file_by_id(file_id=file_id)
        if not fileinfo:
            raise HTTPException(status_code=404, detail="File not found")
        # 删除本地文件
        res_local = delete_local_file(fileinfo.filepath)
        if not res_local:
            raise Exception("本地文件删除失败")
        res_db = Files.del_file_by_id(file_id)
        if not res_db:
            raise Exception("db文件删除失败")
        return {"code": 200, "message": "文件删除成功"}
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"文件删除失败: {str(e)}")