import os
import shutil
from fastapi import APIRouter, HTTPException, Depends, UploadFile, File, Query
from typing import List, Optional

from config import UPLOAD_DIR
from database.database import Document, VectorEmbedding, KnowledgeBase
from database.database_function import get_db
from api.utils import store_embeddings_to_db_batch, get_document_info, embed_chunk
from api.schemas.documents import BatchUploadResponse, DocumentListResponse, DocumentResponse, ChunkListResponse, ChunkResponse, DocumentUpdateRequest, ChunkUpdateRequest


router = APIRouter()


@router.post("/upload/batch", response_model=BatchUploadResponse, summary="批量上传文档")
async def upload_batch_documents(
        files: List[UploadFile] = File(...),
        kb_id: Optional[int] = Query(None, description="可选：知识库ID，指定后上传至该知识库"),
        embedding_model_name: Optional[str] = Query(None, description="可选：嵌入模型名称"),
        chunk_mode: Optional[str] = Query(None, description="分块模式 paragraph/sentence/length/delimiter"),
        chunk_delimiter: Optional[str] = Query(None, description="自定义分隔符，如 \\n 或 \\n\\n"),
        chunk_size: Optional[int] = Query(None, description="按长度分块的大小"),
        chunk_overlap: Optional[int] = Query(None, description="按长度分块的重叠"),
        db= Depends(get_db)
):
    try:
        uploaded_files = []
        file_paths = []

        for file in files:
            if not file.filename.endswith('.txt'):
                continue
            file_path = os.path.join(UPLOAD_DIR, file.filename)
            with open(file_path, "wb") as buffer:
                shutil.copyfileobj(file.file, buffer)
            file_paths.append(file_path)
            uploaded_files.append(file.filename)

        if not file_paths:
            raise HTTPException(status_code=400, detail="没有有效的txt文件")

        chunking = None
        if chunk_mode:
            params = {}
            if chunk_mode == 'delimiter' and chunk_delimiter is not None:
                params['delimiter'] = chunk_delimiter
            if chunk_mode == 'length':
                if chunk_size is not None:
                    params['chunk_size'] = chunk_size
                if chunk_overlap is not None:
                    params['overlap'] = chunk_overlap
            chunking = { 'mode': chunk_mode, 'params': params }

        doc_ids = store_embeddings_to_db_batch(
            file_paths, db,
            kb_id=kb_id,
            embedding_model_name=embedding_model_name,
            chunking=chunking
        )

        results = []
        for i, doc_id in enumerate(doc_ids):
            doc_info = get_document_info(doc_id, db)
            results.append({
                "doc_id": doc_id,
                "doc_name": uploaded_files[i],
                "vector_count": doc_info['vector_count'] if doc_info else 0
            })

        return BatchUploadResponse(
            success=True,
            message=f"批量上传成功，处理了{len(results)}个文档",
            results=results,
            total_documents=len(results)
        )
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"批量文档处理失败: {str(e)}")


@router.get("/documents", response_model=DocumentListResponse, summary="获取文档列表")
async def get_documents(
        page: int = Query(1, ge=1, description="页码"),
        page_size: int = Query(10, ge=1, le=100, description="每页数量"),
        kb_id: Optional[int] = Query(None, description="可选：按知识库筛选"),
        db= Depends(get_db)
):
    try:
        offset = (page - 1) * page_size
        query = db.query(Document)
        if kb_id is not None:
            query = query.filter(Document.kb_id == kb_id)
        total_count = query.count()
        documents = query.offset(offset).limit(page_size).all()
        doc_list = []
        for doc in documents:
            vector_count = db.query(VectorEmbedding).filter(VectorEmbedding.doc_id == doc.id).count()
            doc_list.append(DocumentResponse(
                id=doc.id,
                doc_name=doc.doc_name,
                doc_hash=doc.doc_hash,
                created_at=doc.created_at,
                updated_at=doc.updated_at,
                vector_count=vector_count
            ))
        return DocumentListResponse(
            success=True,
            documents=doc_list,
            total_count=total_count,
            page=page,
            page_size=page_size,
            total_pages=(total_count + page_size - 1) // page_size
        )
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"获取文档列表失败: {str(e)}")


@router.get("/documents/{doc_id}", response_model=DocumentResponse, summary="获取文档详情")
async def get_document_detail(doc_id: int, db= Depends(get_db)):
    try:
        doc_info = get_document_info(doc_id, db)
        if not doc_info:
            raise HTTPException(status_code=404, detail="文档不存在")
        return DocumentResponse(**doc_info)
    except HTTPException:
        raise
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"获取文档信息失败: {str(e)}")


@router.delete("/documents/{doc_id}", summary="删除文档")
async def delete_document(doc_id: int, db= Depends(get_db)):
    try:
        doc = db.query(Document).filter(Document.id == doc_id).first()
        if not doc:
            raise HTTPException(status_code=404, detail="文档不存在")
        vector_count = db.query(VectorEmbedding).filter(VectorEmbedding.doc_id == doc_id).count()
        db.query(VectorEmbedding).filter(VectorEmbedding.doc_id == doc_id).delete()
        db.delete(doc)
        db.commit()
        return {
            "success": True,
            "message": f"成功删除文档'{doc.doc_name}'及其{vector_count}个向量数据"
        }
    except HTTPException:
        raise
    except Exception as e:
        db.rollback()
        raise HTTPException(status_code=500, detail=f"删除文档失败: {str(e)}")


@router.patch("/documents/{doc_id}", summary="更新文档信息")
async def update_document(doc_id: int, body: DocumentUpdateRequest, db=Depends(get_db)):
    try:
        doc = db.query(Document).filter(Document.id == doc_id).first()
        if not doc:
            raise HTTPException(status_code=404, detail="文档不存在")
        if body.doc_name is not None:
            doc.doc_name = body.doc_name
        if body.is_enabled is not None:
            doc.is_enabled = bool(body.is_enabled)
        db.commit()
        return {"success": True}
    except HTTPException:
        raise
    except Exception as e:
        db.rollback()
        raise HTTPException(status_code=500, detail=f"更新文档失败: {str(e)}")


@router.patch("/documents/{doc_id}/chunks/{chunk_index}", summary="更新分块内容")
async def update_chunk(doc_id: int, chunk_index: int, body: ChunkUpdateRequest, db=Depends(get_db)):
    try:
        chunk = db.query(VectorEmbedding).filter(
            VectorEmbedding.doc_id == doc_id,
            VectorEmbedding.chunk_index == chunk_index
        ).first()
        if not chunk:
            raise HTTPException(status_code=404, detail="分块不存在")
        chunk.chunk_text = body.chunk_text
        doc = db.query(Document).filter(Document.id == doc_id).first()
        model_name = None
        if doc and getattr(doc, 'embedding_model_name', None):
            model_name = doc.embedding_model_name
        try:
            new_embedding = embed_chunk(body.chunk_text, model_name)
            if new_embedding:
                chunk.embedding = new_embedding
        except Exception:
            pass
        db.commit()
        return {"success": True, "reembedded": True, "embedding_model": model_name}
    except HTTPException:
        raise
    except Exception as e:
        db.rollback()
        raise HTTPException(status_code=500, detail=f"更新分块失败: {str(e)}")


@router.post("/documents/{doc_id}/chunks", response_model=ChunkResponse, summary="新增分块")
async def create_chunk(doc_id: int, body: ChunkUpdateRequest, db=Depends(get_db)):
    try:
        doc = db.query(Document).filter(Document.id == doc_id).first()
        if not doc:
            raise HTTPException(status_code=404, detail="文档不存在")
        max_idx = db.query(VectorEmbedding).filter(VectorEmbedding.doc_id == doc_id).order_by(VectorEmbedding.chunk_index.desc()).first()
        next_index = (max_idx.chunk_index + 1) if max_idx else 0
        model_name = getattr(doc, 'embedding_model_name', None)
        embedding = []
        try:
            embedding = embed_chunk(body.chunk_text, model_name) or []
        except Exception:
            embedding = []
        ve = VectorEmbedding(
            doc_id=doc_id,
            chunk_index=next_index,
            chunk_text=body.chunk_text,
            embedding=embedding
        )
        db.add(ve)
        db.commit()
        return ChunkResponse(chunk_index=next_index, chunk_text=body.chunk_text)
    except HTTPException:
        raise
    except Exception as e:
        db.rollback()
        raise HTTPException(status_code=500, detail=f"新增分块失败: {str(e)}")


@router.delete("/documents/{doc_id}/chunks/{chunk_index}", summary="删除分块")
async def delete_chunk(doc_id: int, chunk_index: int, db=Depends(get_db)):
    try:
        chunk = db.query(VectorEmbedding).filter(
            VectorEmbedding.doc_id == doc_id,
            VectorEmbedding.chunk_index == chunk_index
        ).first()
        if not chunk:
            raise HTTPException(status_code=404, detail="分块不存在")
        db.delete(chunk)
        db.commit()
        return {"success": True}
    except HTTPException:
        raise
    except Exception as e:
        db.rollback()
        raise HTTPException(status_code=500, detail=f"删除分块失败: {str(e)}")


@router.get("/documents/{doc_id}/chunks", response_model=ChunkListResponse, summary="获取文档分块（分页）")
async def get_document_chunks(doc_id: int, page: int = Query(1, ge=1, description="页码"), page_size: int = Query(20, ge=1, le=200, description="每页数量"), db=Depends(get_db)):
    try:
        doc = db.query(Document).filter(Document.id == doc_id).first()
        if not doc:
            raise HTTPException(status_code=404, detail="文档不存在")
        offset = (page - 1) * page_size
        q = db.query(VectorEmbedding).filter(VectorEmbedding.doc_id == doc_id)
        total = q.count()
        rows = q.order_by(VectorEmbedding.chunk_index.asc()).offset(offset).limit(page_size).all()
        chunks = [ChunkResponse(chunk_index=r.chunk_index, chunk_text=r.chunk_text) for r in rows]
        return ChunkListResponse(
            success=True,
            chunks=chunks,
            total_count=total,
            page=page,
            page_size=page_size,
            total_pages=(total + page_size - 1) // page_size
        )
    except HTTPException:
        raise
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"获取文档分块失败: {str(e)}")


@router.get("/knowledge-bases/{kb_id}/documents", response_model=DocumentListResponse, summary="按知识库获取文档列表")
async def get_kb_documents(
    kb_id: int,
    page: int = Query(1, ge=1, description="页码"),
    page_size: int = Query(10, ge=1, le=100, description="每页数量"),
    db=Depends(get_db)
):
    try:
        kb = db.query(KnowledgeBase).filter(KnowledgeBase.id == kb_id).first()
        if not kb:
            raise HTTPException(status_code=404, detail="知识库不存在")
        offset = (page - 1) * page_size
        query = db.query(Document).filter(Document.kb_id == kb_id)
        total_count = query.count()
        documents = query.offset(offset).limit(page_size).all()
        doc_list = []
        for doc in documents:
            vector_count = db.query(VectorEmbedding).filter(VectorEmbedding.doc_id == doc.id).count()
            doc_list.append(DocumentResponse(
                id=doc.id,
                doc_name=doc.doc_name,
                doc_hash=doc.doc_hash,
                created_at=doc.created_at,
                updated_at=doc.updated_at,
                vector_count=vector_count,
                embedding_model_name=getattr(doc, 'embedding_model_name', None),
                chunk_mode=getattr(doc, 'chunk_mode', None),
                chunk_params=getattr(doc, 'chunk_params', None),
                is_enabled=getattr(doc, 'is_enabled', True)
            ))
        return DocumentListResponse(
            success=True,
            documents=doc_list,
            total_count=total_count,
            page=page,
            page_size=page_size,
            total_pages=(total_count + page_size - 1) // page_size
        )
    except HTTPException:
        raise
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"获取知识库文档失败: {str(e)}")


