import logging
from typing import List, Dict, Any, Optional
from langchain.schema import Document
from .document_processor import DocumentProcessor
from .reranker import Reranker
from models.llm_models import RAGModel
from config import Config
from config.thresholds import RAGThresholds
from .chroma_manager import chroma_manager
from monitoring import trace_function, trace_rag_pipeline

logger = logging.getLogger(__name__)

class RAGSystem:
    """RAG系统：整合文档处理和问答功能"""
    
    def __init__(self, vector_db_path: str = None, use_reranker: bool = True):
        self.document_processor = DocumentProcessor(vector_db_path)
        self.rag_model = RAGModel()
        self.use_reranker = use_reranker
        self.reranker = Reranker() if use_reranker else None
        # 确保使用全局ChromaDB管理器
        self._vector_store = None
    
    def upload_document(self, file_path: str, collection_name: str = "default") -> Dict[str, Any]:
        """上传并处理文档"""
        try:
            logger.info(f"开始上传文档: {file_path}")
            
            # 处理文档
            vector_store = self.document_processor.process_document(file_path, collection_name)
            
            # 获取集合信息
            collection_info = self.document_processor.get_collection_info(collection_name)
            
            result = {
                "status": "success",
                "message": f"文档 {file_path} 上传成功",
                "collection_info": collection_info,
                "file_path": file_path
            }
            
            logger.info(f"文档上传完成: {file_path}")
            return result
            
        except Exception as e:
            logger.error(f"文档上传失败: {str(e)}")
            return {
                "status": "error",
                "message": f"文档上传失败: {str(e)}",
                "file_path": file_path
            }
    
    @trace_function(name="rag_ask_question")
    def ask_question(self, question: str, collection_name: str = "default", top_k: int = None, rerank_threshold: float = RAGThresholds.DEFAULT_RERANK_THRESHOLD) -> Dict[str, Any]:
        """基于RAG回答问题"""
        try:
            # 获取追踪参数
            user_id = kwargs.get("user_id") if 'kwargs' in locals() else None
            session_id = kwargs.get("session_id") if 'kwargs' in locals() else None
            
            logger.info(f"🔍 RAG问答开始 - user_id: {user_id}, session_id: {session_id}")
            logger.info(f"📝 收到问题: {question}")
            logger.info(f"📚 知识库: {collection_name}, top_k: {top_k}, rerank_threshold: {rerank_threshold}")
            
            # 1. 检索相关文档（检索更多文档用于重排序）
            initial_top_k = top_k * RAGThresholds.INITIAL_RETRIEVAL_MULTIPLIER if top_k else RAGThresholds.DEFAULT_TOP_K * RAGThresholds.INITIAL_RETRIEVAL_MULTIPLIER
            logger.info(f"🔍 开始检索文档，初始top_k: {initial_top_k}")
            
            relevant_docs_with_scores = self.document_processor.search_similar_with_score(question, collection_name, initial_top_k)
            
            # 分离文档和分数
            relevant_docs = [doc for doc, score in relevant_docs_with_scores]
            doc_scores = [score for doc, score in relevant_docs_with_scores]
            
            # 打印检索到的文档内容
            logger.info(f"📄 检索到 {len(relevant_docs)} 个相关文档:")
            for i, (doc, score) in enumerate(relevant_docs_with_scores[:3]):  # 只打印前3个
                logger.info(f"  文档 {i+1} (相似度: {score:.3f}):")
                logger.info(f"    来源: {doc.metadata.get('source', '未知')}")
                logger.info(f"    内容预览: {doc.page_content[:200]}...")
            if len(relevant_docs) > 3:
                logger.info(f"  ... 还有 {len(relevant_docs) - 3} 个文档")
            
            if not relevant_docs:
                result = {
                    "status": "warning",
                    "message": "未找到相关文档",
                    "question": question,
                    "answer": "抱歉，我在知识库中没有找到与您问题相关的信息。",
                    "sources": []
                }
                # 追踪空结果
                trace_rag_pipeline(question, [], result["answer"], {"collection": collection_name, "status": "no_docs"}, 
                                 user_id=kwargs.get("user_id"), session_id=kwargs.get("session_id"))
                return result
            
            # 2. 重排序（如果启用）
            if self.use_reranker and self.reranker:
                logger.info("开始重排序...")
                rerank_result = self.reranker.rerank_with_metadata(
                    query=question,
                    documents=relevant_docs,
                    top_k=top_k or 5,
                    threshold=rerank_threshold
                )
                relevant_docs = rerank_result["reranked_documents"]
                # 从重排序结果中提取真实的相似度分数
                doc_scores = []
                for doc in relevant_docs:
                    # 优先使用重排序分数，如果没有则使用默认分数
                    rerank_score = doc.metadata.get("rerank_score", 0.8)
                    doc_scores.append(rerank_score)
                rerank_stats = {
                    "rerank_enabled": True,
                    "filtered_count": rerank_result["filtered_count"],
                    "total_count": rerank_result["total_count"],
                    "average_score": rerank_result["average_score"],
                    "max_score": rerank_result["max_score"],
                    "min_score": rerank_result["min_score"]
                }
            else:
                # 不使用重排序，直接取前top_k个
                relevant_docs = relevant_docs[:top_k] if top_k else relevant_docs[:RAGThresholds.DEFAULT_TOP_K]
                doc_scores = doc_scores[:len(relevant_docs)] if doc_scores else [0.8] * len(relevant_docs)
                rerank_stats = {"rerank_enabled": False}
            
            # 3. 构建上下文
            context = self._build_context(relevant_docs)
            logger.info(f"📋 构建的上下文长度: {len(context)} 字符")
            logger.info(f"📋 上下文预览: {context[:500]}...")
            
            # 4. 生成回答
            logger.info(f"🤖 开始生成回答，传递参数 - user_id: {user_id}, session_id: {session_id}")
            answer = self.rag_model.generate_response(question, context, user_id=user_id, session_id=session_id)
            logger.info(f"🤖 生成的回答: {answer}")
            
            # 5. 构建源文档信息
            sources = self._extract_sources(relevant_docs, doc_scores)
            
            result = {
                "status": "success",
                "question": question,
                "answer": answer,
                "sources": sources,
                "context_length": len(context),
                "documents_retrieved": len(relevant_docs),
                "rerank_stats": rerank_stats
            }
            
            # 追踪RAG结果
            retrieved_docs = [{"text": doc.page_content, "metadata": doc.metadata} for doc in relevant_docs]
            trace_rag_pipeline(question, retrieved_docs, answer, {
                "collection": collection_name,
                "top_k": top_k,
                "rerank_enabled": self.use_reranker,
                "context_length": len(context),
                "documents_retrieved": len(relevant_docs)
            }, user_id=kwargs.get("user_id"), session_id=kwargs.get("session_id"))
            
            logger.info(f"问题回答完成: {question}")
            return result
            
        except Exception as e:
            logger.error(f"回答问题失败: {str(e)}")
            error_result = {
                "status": "error",
                "message": f"回答问题失败: {str(e)}",
                "question": question
            }
            # 追踪错误
            trace_rag_pipeline(question, [], f"Error: {str(e)}", {"collection": collection_name, "status": "error"}, 
                             user_id=kwargs.get("user_id"), session_id=kwargs.get("session_id"))
            return error_result
    
    def _build_context(self, documents: List[Document]) -> str:
        """构建上下文"""
        context_parts = []
        
        for i, doc in enumerate(documents, 1):
            source = doc.metadata.get("source", "未知来源")
            content = doc.page_content.strip()
            
            context_parts.append(f"文档 {i} (来源: {source}):\n{content}\n")
        
        return "\n".join(context_parts)
    
    def _extract_sources(self, documents: List[Document], scores: List[float] = None) -> List[Dict[str, Any]]:
        """提取源文档信息"""
        sources = []
        
        for i, doc in enumerate(documents):
            source_info = {
                "source": doc.metadata.get("source", "未知来源"),
                "file_name": doc.metadata.get("file_name", "未知文件"),
                "file_type": doc.metadata.get("file_type", "未知类型"),
                "content_preview": doc.page_content[:200] + "..." if len(doc.page_content) > 200 else doc.page_content,
                "similarity": scores[i] if scores and i < len(scores) else None
            }
            sources.append(source_info)
        
        return sources
    
    def get_collection_info(self, collection_name: str = "default") -> Dict[str, Any]:
        """获取集合信息"""
        try:
            return self.document_processor.get_collection_info(collection_name)
        except Exception as e:
            logger.error(f"获取集合信息失败: {str(e)}")
            return {
                "status": "error",
                "message": f"获取集合信息失败: {str(e)}"
            }
    
    def list_collections(self) -> List[str]:
        """列出所有集合"""
        try:
            # 这里可以根据实际需求实现集合列表功能
            # 目前返回默认集合
            return ["default"]
        except Exception as e:
            logger.error(f"列出集合失败: {str(e)}")
            return []
    
    def delete_collection(self, collection_name: str) -> Dict[str, Any]:
        """删除集合"""
        try:
            # 这里可以实现删除集合的功能
            # 需要根据具体的向量数据库实现
            logger.info(f"删除集合: {collection_name}")
            
            return {
                "status": "success",
                "message": f"集合 {collection_name} 删除成功"
            }
            
        except Exception as e:
            logger.error(f"删除集合失败: {str(e)}")
            return {
                "status": "error",
                "message": f"删除集合失败: {str(e)}"
            } 