from fastapi import FastAPI, UploadFile, File, HTTPException, Depends
import logging
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from typing import List, Optional, Dict, Any
import os
import uuid
from datetime import datetime
from dotenv import load_dotenv

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
    handlers=[logging.StreamHandler()]
)
logger = logging.getLogger(__name__)

# 调整第三方库日志级别
logging.getLogger("httpx").setLevel(logging.WARNING)
logging.getLogger("chromadb").setLevel(logging.WARNING)
logging.getLogger("langchain").setLevel(logging.WARNING)

# 加载环境变量
load_dotenv()

# 导入项目模块
from src.ingestion.loaders import DocumentLoader
from src.ingestion.text_splitter import DocumentSplitter
from src.ingestion.vector_store import VectorStoreManager
from src.retrieval.retrieval import RetrievalManager
from src.generation.generator import ResponseGenerator

# 初始化FastAPI应用
app = FastAPI(
    title="RAG知识库问答系统API",
    description="基于LangChain实现的检索增强生成系统API接口",
    version="1.0.0"
)

# 配置CORS
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],  # 生产环境应限制具体域名
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# 全局模块初始化
class RAGSystem:
    """RAG系统单例类"""
    _instance = None
    _initialized = False

    def __new__(cls):
        if cls._instance is None:
            cls._instance = super().__new__(cls)
        return cls._instance

    def initialize(self,** kwargs):
        """初始化RAG系统组件"""
        if self._initialized:
            return

        # 创建文档保存目录
        self.upload_dir = os.path.join(os.path.dirname(__file__), "../../examples/docs")
        os.makedirs(self.upload_dir, exist_ok=True)

        # 初始化文档加载器
        self.document_loader = DocumentLoader()

        # 初始化文本分割器
        self.text_splitter = DocumentSplitter(
            chunk_size=kwargs.get("chunk_size", 500),
            chunk_overlap=kwargs.get("chunk_overlap", 100)
        )

        # 初始化向量存储管理器
        logger.info(f"初始化向量存储管理器: {kwargs.get('vector_store_type', 'chroma')}")
        self.vector_store = VectorStoreManager(
            embedding_model="ollama", 
            vector_store_type=kwargs.get("vector_store_type", "chroma"),
            persist_directory=kwargs.get("persist_directory", "./vector_db")
        )
        logger.debug(f"向量存储初始化完成，持久化目录: {self.vector_store.persist_directory}")

        # 初始化检索管理器
        logger.info(f"初始化检索管理器，策略: {kwargs.get('retrieval_strategy', 'hybrid')}")
        self.retrieval_manager = RetrievalManager(
            vector_store_manager=self.vector_store,
            retrieval_strategy=kwargs.get("retrieval_strategy", "hybrid")
        )

        # 初始化回答生成器
        self.generator = ResponseGenerator()

        self._initialized = True
        logger.info("RAG系统组件初始化完成")

# 创建RAG系统实例
rag_system = RAGSystem()

# 依赖项：确保RAG系统已初始化
def get_rag_system():
    if not rag_system._initialized:
        rag_system.initialize()
    return rag_system

# Pydantic模型
class QueryRequest(BaseModel):
    """查询请求模型"""
    question: str
    top_k: Optional[int] = 3
    retrieval_strategy: Optional[str] = "hybrid"

class QueryResponse(BaseModel):
    """查询响应模型"""
    answer: str
    sources: List[Dict[str, Any]]
    query_time: float

class DocumentResponse(BaseModel):
    """文档响应模型"""
    document_id: str
    filename: str
    upload_time: str
    page_count: int
    status: str

class DeleteDocumentRequest(BaseModel):
    """删除文档请求模型"""
    document_id: str

class DeleteDocumentsRequest(BaseModel):
    """批量删除文档请求模型"""
    document_ids: List[str]

class DuplicateDocumentsResponse(BaseModel):
    """重复文档响应模型"""
    duplicate_groups: List[List[str]]
    total_groups: int

class RemoveDuplicatesResponse(BaseModel):
    """删除重复文档响应模型"""
    removed_count: int
    remaining_count: int

class DocumentInfo(BaseModel):
    """文档详细信息模型"""
    id: str
    vector_id: str
    metadata: Dict[str, Any]

class DocumentsInfoResponse(BaseModel):
    """文档详细信息列表响应模型"""
    documents: List[DocumentInfo]
    total_count: int

# API路由
@app.on_event("startup")
async def startup_event():
    """应用启动时初始化RAG系统"""
    logger.info("应用启动，开始初始化RAG系统...")
    rag_system.initialize()
    logger.info("RAG系统初始化完成，应用启动成功")

@app.post("/upload", response_model=DocumentResponse)
async def upload_document(
    file: UploadFile = File(...),
    rag: RAGSystem = Depends(get_rag_system)
):
    """上传文档到知识库"""
    try:
        # 保存上传的文件
        logger.info(f"开始处理文件上传: {file.filename}")
        file_path = os.path.join(rag.upload_dir, file.filename)
        logger.debug(f"文件保存路径: {file_path}")
        with open(file_path, "wb") as f:
            f.write(await file.read())
        logger.debug(f"文件保存成功，大小: {os.path.getsize(file_path)} bytes")

        # 加载文档
        logger.info(f"开始加载文档: {file_path}")
        documents = rag.document_loader.load_single_document(file_path)
        if not documents:
            logger.warning(f"文档加载失败，内容为空: {file_path}")
            raise HTTPException(status_code=400, detail="无法加载文档内容")
        logger.info(f"文档加载完成，页数: {len(documents)}")

        # 分割文档
        logger.info("开始分割文档")
        chunks = rag.text_splitter.split_documents(documents)
        logger.info(f"文档分割完成，生成 {len(chunks)} 个文本块")

        # 添加到向量存储
        logger.info("开始将文档块添加到向量存储")
        doc_ids = rag.vector_store.add_documents(chunks)
        logger.info(f"文档成功添加到向量存储，文档ID列表: {doc_ids}")

        return DocumentResponse(
            document_id=doc_ids[0] if doc_ids else str(uuid.uuid4()),
            filename=file.filename,
            upload_time=datetime.now().isoformat(),
            page_count=len(documents),
            status="success"
        )
    except Exception as e:
        logger.error(f"上传文档失败: {str(e)}", exc_info=True)
        raise HTTPException(status_code=500, detail=f"上传文档失败: {str(e)}")

@app.post("/query", response_model=QueryResponse)
async def query_knowledge_base(
    request: QueryRequest,
    rag: RAGSystem = Depends(get_rag_system)
):
    """查询知识库并生成回答"""
    try:
        # 记录查询开始时间
        start_time = datetime.now()
        logger.info(f"收到查询请求: {request.question}, top_k={request.top_k}, strategy={request.retrieval_strategy}")

        # 切换检索策略（如果需要）
        if request.retrieval_strategy != rag.retrieval_manager.retrieval_strategy:
            logger.info(f"切换检索策略: {rag.retrieval_manager.retrieval_strategy} -> {request.retrieval_strategy}")
            rag.retrieval_manager.switch_strategy(request.retrieval_strategy)
        else:
            logger.debug(f"使用当前检索策略: {request.retrieval_strategy}")

        # 检索相关文档
        logger.info(f"开始检索相关文档，top_k={request.top_k}")
        documents = rag.retrieval_manager.retrieve(
            query=request.question,
            top_k=request.top_k
        )
        logger.info(f"检索完成，找到 {len(documents)} 个相关文档")

        # 生成回答
        logger.info("开始生成回答")
        result = rag.generator.generate(
            question=request.question,
            documents=documents
        )
        logger.debug(f"回答生成完成，长度: {len(result['answer'])} 字符")

        # 计算查询时间
        query_time = (datetime.now() - start_time).total_seconds()

        return QueryResponse(
            answer=result["answer"],
            sources=result["sources"],
            query_time=query_time
        )
    except Exception as e:
        logger.error(f"查询处理失败: {str(e)}", exc_info=True)
        raise HTTPException(status_code=500, detail=f"查询处理失败: {str(e)}")

@app.get("/documents", response_model=List[DocumentResponse])
async def list_documents(
    rag: RAGSystem = Depends(get_rag_system)
):
    """列出已上传的文档"""
    # 简化实现：实际应用中应从数据库查询文档元数据
    try:
        # 获取目录中的所有文档
        logger.info("开始获取文档列表")
        docs = []
        file_count = 0
        for filename in os.listdir(rag.upload_dir):
            file_count += 1
            if filename.endswith((".pdf", ".docx", ".txt", ".md")):
                file_path = os.path.join(rag.upload_dir, filename)
                # 加载文档获取页数
                try:
                    document = rag.document_loader.load_single_document(file_path)
                    page_count = len(document)
                except:
                    page_count = 0

                docs.append(DocumentResponse(
                    document_id=str(uuid.uuid5(uuid.NAMESPACE_URL, file_path)),
                    filename=filename,
                    upload_time=datetime.fromtimestamp(os.path.getctime(file_path)).isoformat(),
                    page_count=page_count,
                    status="processed"
                ))
        logger.info(f"文档列表获取完成，共找到 {len(docs)} 个有效文档（总文件数: {file_count}）")
        return docs
    except Exception as e:
        logger.error(f"获取文档列表失败: {str(e)}", exc_info=True)
        raise HTTPException(status_code=500, detail=f"获取文档列表失败: {str(e)}")

@app.get("/health")
async def health_check():
    """健康检查接口"""
    logger.debug("健康检查请求收到")
    return {"status": "healthy", "service": "rag-api", "version": "1.0.0"}

# 文档管理API路由
@app.delete("/documents/{document_id}", response_model=Dict[str, Any])
async def delete_document(
    document_id: str,
    rag: RAGSystem = Depends(get_rag_system)
):
    """删除指定ID的文档"""
    try:
        logger.info(f"开始删除文档，ID: {document_id}")
        result = rag.vector_store.delete_document(document_id)
        if result:
            logger.info(f"文档删除成功，ID: {document_id}")
            return {"status": "success", "message": "文档删除成功", "document_id": document_id}
        else:
            logger.warning(f"文档不存在或删除失败，ID: {document_id}")
            raise HTTPException(status_code=404, detail=f"文档不存在或删除失败，ID: {document_id}")
    except Exception as e:
        logger.error(f"删除文档失败: {str(e)}", exc_info=True)
        raise HTTPException(status_code=500, detail=f"删除文档失败: {str(e)}")

@app.post("/documents/delete", response_model=Dict[str, Any])
async def delete_documents(
    request: DeleteDocumentsRequest,
    rag: RAGSystem = Depends(get_rag_system)
):
    """批量删除文档"""
    try:
        logger.info(f"开始批量删除文档，共 {len(request.document_ids)} 个ID")
        deleted_count = rag.vector_store.delete_documents(request.document_ids)
        logger.info(f"批量删除完成，成功删除 {deleted_count} 个文档")
        return {
            "status": "success",
            "message": f"成功删除 {deleted_count} 个文档",
            "deleted_count": deleted_count,
            "total_ids": len(request.document_ids)
        }
    except Exception as e:
        logger.error(f"批量删除文档失败: {str(e)}", exc_info=True)
        raise HTTPException(status_code=500, detail=f"批量删除文档失败: {str(e)}")

@app.get("/documents/ids", response_model=List[str])
async def list_document_ids(
    rag: RAGSystem = Depends(get_rag_system)
):
    """列出所有文档ID"""
    try:
        logger.info("开始获取所有文档ID")
        doc_ids = rag.vector_store.list_document_ids()
        logger.info(f"获取成功，共 {len(doc_ids)} 个文档ID")
        return doc_ids
    except Exception as e:
        logger.error(f"获取文档ID列表失败: {str(e)}", exc_info=True)
        raise HTTPException(status_code=500, detail=f"获取文档ID列表失败: {str(e)}")

@app.get("/documents/info", response_model=DocumentsInfoResponse)
async def list_documents_info(
    rag: RAGSystem = Depends(get_rag_system)
):
    """列出所有文档详细信息"""
    try:
        logger.info("开始获取所有文档详细信息")
        documents = rag.vector_store.list_documents()
        logger.info(f"获取成功，共 {len(documents)} 个文档")
        return DocumentsInfoResponse(
            documents=documents,
            total_count=len(documents)
        )
    except Exception as e:
        logger.error(f"获取文档详细信息失败: {str(e)}", exc_info=True)
        raise HTTPException(status_code=500, detail=f"获取文档详细信息失败: {str(e)}")

@app.get("/documents/duplicates", response_model=DuplicateDocumentsResponse)
async def find_duplicate_documents(
    similarity_threshold: float = 0.95,
    rag: RAGSystem = Depends(get_rag_system)
):
    """查找重复文档"""
    try:
        logger.info(f"开始查找重复文档，相似度阈值: {similarity_threshold}")
        duplicate_groups = rag.vector_store.find_duplicate_documents(similarity_threshold)
        logger.info(f"查找完成，找到 {len(duplicate_groups)} 组重复文档")
        return DuplicateDocumentsResponse(
            duplicate_groups=duplicate_groups,
            total_groups=len(duplicate_groups)
        )
    except Exception as e:
        logger.error(f"查找重复文档失败: {str(e)}", exc_info=True)
        raise HTTPException(status_code=500, detail=f"查找重复文档失败: {str(e)}")

@app.post("/documents/remove-duplicates", response_model=RemoveDuplicatesResponse)
async def remove_duplicates(
    similarity_threshold: float = 0.95,
    rag: RAGSystem = Depends(get_rag_system)
):
    """删除重复文档"""
    try:
        logger.info(f"开始删除重复文档，相似度阈值: {similarity_threshold}")
        removed_count, remaining_count = rag.vector_store.remove_duplicates(similarity_threshold)
        logger.info(f"删除完成，移除 {removed_count} 个重复文档，剩余 {remaining_count} 个文档")
        return RemoveDuplicatesResponse(
            removed_count=removed_count,
            remaining_count=remaining_count
        )
    except Exception as e:
        logger.error(f"删除重复文档失败: {str(e)}", exc_info=True)
        raise HTTPException(status_code=500, detail=f"删除重复文档失败: {str(e)}")

if __name__ == "__main__":
    import uvicorn
    uvicorn.run("main:app", host="0.0.0.0", port=8000, reload=True)