"""RAG（检索增强生成）相关路由"""

import logging
import asyncio
import os
import uuid

from fastapi import APIRouter, HTTPException, Depends, UploadFile, File, Form, BackgroundTasks
from pydantic import BaseModel
from typing import Dict, Any, List, Optional, Union

from backend.common.config import config_manager
from backend.rag.retriever import RAGManager
from backend.logger_setup import get_logger_with_trace_id
from backend.middleware.trace_middleware import get_trace_id
from backend.common.status_manager import status_manager

# 创建路由器
router = APIRouter(
    prefix=f"/{config_manager.api.version}/rag",
    tags=["RAG"],
    responses={404: {"description": "未找到"}},
)

logger = logging.getLogger('api.rag')

# 定义请求模型
class QueryRequest(BaseModel):
    query: str
    top_k: int = 5
    score_threshold: float = 0.3
    use_rerank: bool = True
    metadata_filters: Optional[Dict[str, Any]] = None

class DocumentMetadata(BaseModel):
    title: str
    source: str
    category: Optional[str] = None
    tags: Optional[List[str]] = None
    custom_metadata: Optional[Dict[str, Any]] = None

class IndexDocumentRequest(BaseModel):
    documents: List[Dict[str, Any]]
    collection_name: Optional[str] = None


@router.post("/query")
async def rag_query(
    request: QueryRequest,
    trace_id: str = Depends(get_trace_id)
):
    """执行RAG查询，检索相关文档片段"""
    # 获取带trace_id的logger
    log = get_logger_with_trace_id(logger, trace_id)
    log.info(f"接收到RAG查询请求: query={request.query[:50]}..., top_k={request.top_k}")
    
    try:
        # 初始化RAG管理器
        retriever = RAGManager()
        
        # 执行检索
        results = retriever.retrieve_relevant_docs(
            query=request.query,
            top_k=request.top_k,
            score_threshold=request.score_threshold
        )
        
        # 格式化结果
        formatted_results = [{
            "id": result["id"],
            "content": result["content"],
            "score": result["score"],
            "metadata": result["metadata"],
            "source": result["metadata"].get("source", "unknown"),
            "chunk_id": result["metadata"].get("chunk_id", "unknown")
        } for result in results]
        
        return {
            "query": request.query,
            "results": formatted_results,
            "total": len(formatted_results),
            "trace_id": trace_id
        }
    except Exception as e:
        log.error(f"RAG查询失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"RAG query failed: {str(e)}")


@router.post("/upload")
async def upload_document(
    file: UploadFile = File(...),
    title: str = Form(...),
    category: Optional[str] = Form(None),
    tags: Optional[str] = Form(None),
    collection_name: Optional[str] = Form(None),
    background_tasks: BackgroundTasks = BackgroundTasks(),
    trace_id: str = Depends(get_trace_id)
):
    """上传文档并异步处理"""
    # 获取带trace_id的logger
    log = get_logger_with_trace_id(logger, trace_id)
    log.info(f"接收到文档上传请求: filename={file.filename}, title={title}")
    
    try:
        # 验证文件类型
        allowed_extensions = ['.pdf', '.docx', '.xlsx', '.md', '.txt']
        file_ext = os.path.splitext(file.filename)[1].lower()
        
        if file_ext not in allowed_extensions:
            raise HTTPException(
                status_code=400,
                detail=f"不支持的文件类型: {file_ext}。支持的类型: {', '.join(allowed_extensions)}"
            )
        
        # 生成唯一的任务ID
        task_id = str(uuid.uuid4())
        
        # 处理标签
        tags_list = tags.split(',') if tags else []
        
        # 保存上传的文件
        temp_file_path = os.path.join(
            config_manager.rag.temp_dir,
            f"{task_id}{file_ext}"
        )
        
        # 确保临时目录存在
        os.makedirs(config_manager.rag.temp_dir, exist_ok=True)
        
        # 写入文件
        with open(temp_file_path, "wb") as f:
            f.write(await file.read())
        
        # 记录上传状态
        upload_metadata = {
            "task_id": task_id,
            "filename": file.filename,
            "title": title,
            "category": category,
            "tags": tags_list,
            "file_size": os.path.getsize(temp_file_path),
            "upload_time": asyncio.get_event_loop().time(),
            "status": "uploaded",
            "trace_id": trace_id
        }
        
        # 将上传信息添加到状态管理器
        status_manager.add_upload_status(task_id, upload_metadata)
        
        # 添加后台任务处理文档
        background_tasks.add_task(
            process_document_in_background,
            task_id, temp_file_path, upload_metadata, collection_name
        )
        
        return {
            "task_id": task_id,
            "message": "文档上传成功，正在处理中",
            "status_url": f"/{config_manager.api.version}/rag/upload/status/{task_id}",
            "trace_id": trace_id
        }
    except HTTPException:
        # 重新抛出HTTP异常，已经包含了详细信息
        raise
    except Exception as e:
        log.error(f"文档上传失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"Failed to upload document: {str(e)}")


@router.get("/upload/status/{task_id}")
async def get_upload_status(
    task_id: str,
    trace_id: str = Depends(get_trace_id)
):
    """获取文档上传和处理状态"""
    # 获取带trace_id的logger
    log = get_logger_with_trace_id(logger, trace_id)
    log.info(f"获取上传状态: task_id={task_id}")
    
    try:
        # 从状态管理器获取状态
        status = status_manager.get_upload_status(task_id)
        
        if not status:
            raise HTTPException(status_code=404, detail=f"Task {task_id} not found")
        
        # 添加trace_id到返回结果
        status["trace_id"] = trace_id
        
        return status
    except HTTPException:
        # 重新抛出HTTP异常
        raise
    except Exception as e:
        log.error(f"获取上传状态失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"Failed to get upload status: {str(e)}")


@router.delete("/documents/{document_id}")
async def delete_document(
    document_id: str,
    collection_name: Optional[str] = None,
    trace_id: str = Depends(get_trace_id)
):
    """删除指定文档"""
    # 获取带trace_id的logger
    log = get_logger_with_trace_id(logger, trace_id)
    log.info(f"删除文档: document_id={document_id}")
    
    try:
        # 临时实现: 返回未实现的错误
        raise HTTPException(
            status_code=501,
            detail="文档删除功能尚未实现"
        )
    except HTTPException:
        # 重新抛出HTTP异常
        raise
    except Exception as e:
        log.error(f"删除文档失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"Failed to delete document: {str(e)}")


@router.get("/collections")
async def list_collections(
    trace_id: str = Depends(get_trace_id)
):
    """列出所有文档集合"""
    # 获取带trace_id的logger
    log = get_logger_with_trace_id(logger, trace_id)
    log.info("列出所有文档集合")
    
    try:
        # 初始化RAG管理器
        retriever = RAGManager()
        
        # 获取集合列表 (临时实现)
        collections = []
        
        return {
            "collections": collections,
            "total": len(collections),
            "trace_id": trace_id
        }
    except Exception as e:
        log.error(f"获取集合列表失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"Failed to list collections: {str(e)}")


@router.get("/documents")
async def list_documents(
    collection_name: Optional[str] = None,
    category: Optional[str] = None,
    tag: Optional[str] = None,
    limit: int = 50,
    offset: int = 0,
    trace_id: str = Depends(get_trace_id)
):
    """列出文档，支持筛选和分页"""
    # 获取带trace_id的logger
    log = get_logger_with_trace_id(logger, trace_id)
    log.info(f"列出文档: collection={collection_name}, category={category}, limit={limit}, offset={offset}")
    
    try:
        # 初始化RAG管理器
        retriever = RAGManager()
        
        # 获取文档列表 (临时实现)
        documents = []
        total = 0
        
        return {
            "documents": documents,
            "total": total,
            "limit": limit,
            "offset": offset,
            "trace_id": trace_id
        }
    except Exception as e:
        log.error(f"列出文档失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"Failed to list documents: {str(e)}")


async def process_document_in_background(
    task_id: str,
    file_path: str,
    metadata: Dict[str, Any],
    collection_name: Optional[str]
):
    """后台处理文档"""
    log = logging.getLogger('rag.processor')
    
    try:
        # 更新状态为处理中
        status_manager.update_upload_status(task_id, {
            "status": "processing",
            "progress": 0,
            "start_time": asyncio.get_event_loop().time()
        })
        
        # 初始化RAG管理器
        rag_manager = RAGManager()
        
        # 处理文档
        try:
            # 使用RAGManager的方法添加文档
            total_chunks = rag_manager.add_document_to_vector_db(
                file_path=file_path,
                metadata={
                    "title": metadata.get("title"),
                    "category": metadata.get("category"),
                    "tags": metadata.get("tags"),
                    "source": metadata.get("filename")
                }
            )
            
            result = {
                "success": True,
                "total_chunks": total_chunks,
                "message": "文档处理成功"
            }
        except Exception as e:
            # 如果处理失败，返回错误信息
            result = {
                "success": False,
                "error": str(e),
                "message": "文档处理失败"
            }
        
        # 更新状态为完成
        status_manager.update_upload_status(task_id, {
            "status": "completed",
            "progress": 100,
            "end_time": asyncio.get_event_loop().time(),
            "result": result
        })
        
        log.info(f"文档处理完成: task_id={task_id}, file={metadata['filename']}")
    except Exception as e:
        error_message = str(e)
        log.error(f"文档处理失败: task_id={task_id}, error={error_message}")
        
        # 更新状态为失败
        status_manager.update_upload_status(task_id, {
            "status": "failed",
            "error": error_message,
            "end_time": asyncio.get_event_loop().time()
        })
    finally:
        # 清理临时文件
        try:
            if os.path.exists(file_path):
                os.remove(file_path)
        except Exception as cleanup_error:
            log.error(f"清理临时文件失败: {str(cleanup_error)}")