"""文档管理路由"""
from datetime import datetime
from typing import Annotated
from fastapi import APIRouter, UploadFile, File, HTTPException, Depends
from app.models.schemas import (
    DocumentUploadResponse,
    DocumentListResponse,
    DocumentDeleteResponse,
    DocumentInfo
)
from app.services.embedding import EmbeddingService
from app.services.storage import StorageService
from app.utils.text_processing import TextProcessor
from app.core.dependencies import get_embedding_service, get_storage_service
from app.core.config import settings

router = APIRouter(prefix="/api/documents", tags=["documents"])


@router.post("/upload", response_model=DocumentUploadResponse)
async def upload_document(
    file: UploadFile = File(...),
    embedding_service: EmbeddingService = Depends(get_embedding_service),
    storage_service: StorageService = Depends(get_storage_service)
):
    """
    上传文档并处理
    
    - 接收上传的文本文件
    - 保存原始文本文件
    - 按照固定大小进行文本分块
    - 生成嵌入向量
    - 将文本块和向量数据追加到统一的 JSON 文件
    """
    # 验证文件类型
    if not file.filename.endswith(".txt"):
        raise HTTPException(status_code=400, detail="仅支持 txt 格式文件")
    
    # 验证文件大小
    content = await file.read()
    if len(content) > settings.MAX_FILE_SIZE:
        raise HTTPException(status_code=400, detail=f"文件大小超过限制（{settings.MAX_FILE_SIZE / 1024 / 1024}MB）")
    
    # 重置文件指针
    await file.seek(0)
    
    try:
        # 生成文档ID
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        document_id = f"doc_{timestamp}"
        
        # 保存原始文件
        file_path = await storage_service.save_uploaded_file(file, document_id)
        
        # 读取文件内容
        text_content = content.decode("utf-8")
        
        # 文本分块
        text_processor = TextProcessor(chunk_size=settings.CHUNK_SIZE)
        chunks = text_processor.chunk_text(text_content)
        
        # 批量生成嵌入向量
        chunk_texts = [chunk["content"] for chunk in chunks]
        embeddings = await embedding_service.generate_embeddings_batch(chunk_texts)
        
        # 构建文档数据（扁平化结构）
        chunk_data_list = []
        for i, (chunk, embedding) in enumerate(zip(chunks, embeddings)):
            chunk_data = {
                "chunk_id": f"{document_id}_chunk_{i}",
                "document_id": document_id,
                "filename": file.filename,  # 添加 filename 字段
                "index": i,
                "content": chunk["content"],
                "token_count": chunk["token_count"],
                "embedding": embedding
            }
            chunk_data_list.append(chunk_data)
        
        # 文档元数据（不包含 chunks）
        document_metadata = {
            "document_id": document_id,
            "filename": file.filename,
            "upload_time": datetime.now().isoformat(),
            "file_path": file_path,
            "chunk_size": settings.CHUNK_SIZE,
            "chunks_count": len(chunks)
        }
        
        # 扁平化数据结构：chunks 和 document_metadata 分开
        document_data = {
            "chunks": chunk_data_list,
            "document_metadata": document_metadata
        }
        
        # 保存文档数据
        await storage_service.add_document(document_data)
        
        return DocumentUploadResponse(
            document_id=document_id,
            filename=file.filename,
            chunks_count=len(chunks)
        )
    
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"文档处理失败: {str(e)}")


@router.get("/", response_model=DocumentListResponse)
async def list_documents(
    storage_service: StorageService = Depends(get_storage_service)
):
    """
    获取文档列表
    
    返回所有已上传文档的基本信息
    """
    try:
        documents = await storage_service.list_documents()
        
        document_infos = [
            DocumentInfo(**doc) for doc in documents
        ]
        
        return DocumentListResponse(documents=document_infos)
    
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"获取文档列表失败: {str(e)}")


@router.delete("/{document_id}", response_model=DocumentDeleteResponse)
async def delete_document(
    document_id: str,
    storage_service: StorageService = Depends(get_storage_service)
):
    """
    删除文档
    
    根据文档ID删除指定文档及其向量数据
    """
    try:
        success = await storage_service.delete_document(document_id)
        
        if not success:
            raise HTTPException(status_code=404, detail="文档不存在")
        
        return DocumentDeleteResponse()
    
    except HTTPException:
        raise
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"删除文档失败: {str(e)}")

