import os
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, StorageContext, load_index_from_storage
from llama_index.llms.huggingface import HuggingFaceLLM
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core.prompts.prompts import SimpleInputPrompt
from loguru import logger
from typing import Dict, Optional, Any, List

from config import settings
from models.model_manager import ModelManager

class RAGManager:
    """RAG管理器，负责知识库检索和增强生成"""
    
    _instance = None
    _indices = {}
    
    def __new__(cls):
        """单例模式"""
        if cls._instance is None:
            cls._instance = super(RAGManager, cls).__new__(cls)
        return cls._instance
    
    def __init__(self):
        """初始化RAG管理器"""
        # 确保只初始化一次
        if not hasattr(self, "initialized"):
            self.initialized = True
            
            # 确保数据目录和索引目录存在
            os.makedirs(settings.RAG_DATA_DIR, exist_ok=True)
            os.makedirs(settings.RAG_INDEX_DIR, exist_ok=True)
            
            # 初始化嵌入模型
            self.embed_model = HuggingFaceEmbedding(
                model_name="BAAI/bge-large-zh-v1.5",  # 使用中文嵌入模型
                cache_folder=settings.HF_CACHE_DIR
            )
            
            # 尝试加载默认索引
            try:
                self.load_default_index()
            except Exception as e:
                logger.warning(f"默认索引加载失败: {str(e)}")
                logger.info("请使用build_index方法构建索引")
    
    def _create_llm(self, model_name: Optional[str] = None) -> HuggingFaceLLM:
        """创建LlamaIndex兼容的LLM实例
        
        Args:
            model_name: 模型名称
        
        Returns:
            HuggingFaceLLM实例
        """
        from transformers import AutoTokenizer, AutoModelForCausalLM
        import torch
        
        # 获取模型和tokenizer
        model_manager = ModelManager()
        
        if model_name is None:
            model_name = settings.DEFAULT_MODEL_NAME
        
        # 确保模型已加载
        if model_name not in model_manager._models:
            model_manager.load_model(model_name=model_name)
        
        model_info = model_manager._models[model_name]
        model = model_info["model"]
        tokenizer = model_info["tokenizer"]
        
        # 设置系统提示模板
        system_prompt = """You are a helpful assistant. You will answer the user's questions based on the provided context. If you don't know the answer, say you don't know."""
        
        # 创建查询包装器
        query_wrapper_prompt = SimpleInputPrompt("{query_str}")
        
        # 创建HuggingFaceLLM实例
        llm = HuggingFaceLLM(
            model=model,
            tokenizer=tokenizer,
            context_window=settings.MAX_LENGTH,
            max_new_tokens=2048,
            temperature=settings.TEMPERATURE,
            system_prompt=system_prompt,
            query_wrapper_prompt=query_wrapper_prompt,
            tokenizer_kwargs={"trust_remote_code": True},
        )
        
        return llm
    
    def load_default_index(self) -> Dict[str, Any]:
        """加载默认索引"""
        return self.load_index("default")
    
    def load_index(self, index_name: str) -> Dict[str, Any]:
        """加载指定的索引
        
        Args:
            index_name: 索引名称
        
        Returns:
            索引信息字典
        """
        if index_name in self._indices:
            logger.info(f"索引 {index_name} 已经加载，直接返回")
            return {
                "index_name": index_name,
                "status": "loaded"
            }
        
        index_path = os.path.join(settings.RAG_INDEX_DIR, index_name)
        
        # 检查索引是否存在
        if not os.path.exists(os.path.join(index_path, "vector_store.json")):
            raise FileNotFoundError(f"索引 {index_name} 不存在，请先构建索引")
        
        logger.info(f"开始加载索引: {index_name}")
        
        try:
            # 加载索引
            storage_context = StorageContext.from_defaults(persist_dir=index_path)
            index = load_index_from_storage(
                storage_context=storage_context,
                embed_model=self.embed_model
            )
            
            # 保存索引
            self._indices[index_name] = index
            
            logger.info(f"索引 {index_name} 加载成功")
            
            return {
                "index_name": index_name,
                "status": "success",
                "index_path": index_path
            }
            
        except Exception as e:
            logger.error(f"索引 {index_name} 加载失败: {str(e)}")
            raise
    
    def build_index(
        self,
        index_name: str = "default",
        data_dir: Optional[str] = None,
        chunk_size: Optional[int] = None,
        chunk_overlap: Optional[int] = None
    ) -> Dict[str, Any]:
        """构建知识库索引
        
        Args:
            index_name: 索引名称
            data_dir: 数据目录
            chunk_size: 块大小
            chunk_overlap: 块重叠大小
        
        Returns:
            构建结果字典
        """
        logger.info(f"开始构建索引: {index_name}")
        
        # 确定数据目录
        if data_dir is None:
            data_dir = settings.RAG_DATA_DIR
        
        # 确定块大小和重叠大小
        if chunk_size is None:
            chunk_size = settings.CHUNK_SIZE
        
        if chunk_overlap is None:
            chunk_overlap = settings.CHUNK_OVERLAP
        
        # 检查数据目录是否存在
        if not os.path.exists(data_dir):
            raise FileNotFoundError(f"数据目录不存在: {data_dir}")
        
        # 检查数据目录是否为空
        if not os.listdir(data_dir):
            raise ValueError(f"数据目录为空: {data_dir}")
        
        try:
            # 加载文档
            documents = SimpleDirectoryReader(
                input_dir=data_dir,
                recursive=True,
                required_exts=[".txt", ".pdf", ".docx", ".md"]
            ).load_data()
            
            logger.info(f"成功加载 {len(documents)} 个文档")
            
            # 构建索引
            index = VectorStoreIndex.from_documents(
                documents=documents,
                embed_model=self.embed_model,
                chunk_size=chunk_size,
                chunk_overlap=chunk_overlap
            )
            
            # 保存索引
            index_path = os.path.join(settings.RAG_INDEX_DIR, index_name)
            os.makedirs(index_path, exist_ok=True)
            index.storage_context.persist(persist_dir=index_path)
            
            # 保存到内存
            self._indices[index_name] = index
            
            logger.info(f"索引 {index_name} 构建完成，保存在: {index_path}")
            
            return {
                "status": "success",
                "index_name": index_name,
                "index_path": index_path,
                "num_documents": len(documents),
                "chunk_size": chunk_size,
                "chunk_overlap": chunk_overlap
            }
            
        except Exception as e:
            logger.error(f"索引 {index_name} 构建失败: {str(e)}")
            raise
    
    def add_document(
        self,
        file_path: str,
        index_name: str = "default"
    ) -> Dict[str, Any]:
        """添加单个文档到知识库
        
        Args:
            file_path: 文档路径
            index_name: 索引名称
        
        Returns:
            添加结果字典
        """
        logger.info(f"开始添加文档: {file_path} 到索引: {index_name}")
        
        # 检查文件是否存在
        if not os.path.exists(file_path):
            raise FileNotFoundError(f"文件不存在: {file_path}")
        
        try:
            # 复制文件到数据目录
            import shutil
            file_name = os.path.basename(file_path)
            dest_path = os.path.join(settings.RAG_DATA_DIR, file_name)
            
            # 确保目标路径是唯一的
            if os.path.exists(dest_path):
                base_name, ext = os.path.splitext(file_name)
                import time
                timestamp = time.strftime("%Y%m%d_%H%M%S")
                dest_path = os.path.join(settings.RAG_DATA_DIR, f"{base_name}_{timestamp}{ext}")
            
            shutil.copy2(file_path, dest_path)
            
            # 重新构建索引
            result = self.build_index(
                index_name=index_name,
                chunk_size=settings.CHUNK_SIZE,
                chunk_overlap=settings.CHUNK_OVERLAP
            )
            
            logger.info(f"文档添加成功: {file_path} -> {dest_path}")
            
            result["original_file_path"] = file_path
            result["dest_file_path"] = dest_path
            
            return result
            
        except Exception as e:
            logger.error(f"文档添加失败: {str(e)}")
            raise
    
    def query(
        self,
        query: str,
        model_name: Optional[str] = None,
        index_name: str = "default",
        top_k: int = 5,
        use_rag: bool = True
    ) -> Dict[str, Any]:
        """使用RAG进行查询
        
        Args:
            query: 查询文本
            model_name: 模型名称
            index_name: 索引名称
            top_k: 检索的文档数量
            use_rag: 是否使用RAG增强
        
        Returns:
            查询结果字典
        """
        logger.info(f"开始RAG查询: {query}")
        
        try:
            if use_rag:
                # 确保索引已加载
                if index_name not in self._indices:
                    self.load_index(index_name)
                
                # 获取索引
                index = self._indices[index_name]
                
                # 创建LLM
                llm = self._create_llm(model_name)
                
                # 创建查询引擎
                query_engine = index.as_query_engine(
                    llm=llm,
                    similarity_top_k=top_k,
                    verbose=True
                )
                
                # 执行查询
                response = query_engine.query(query)
                
                # 获取检索到的相关文档
                source_nodes = response.source_nodes
                sources = []
                for node in source_nodes:
                    sources.append({
                        "score": float(node.score),
                        "text": node.node.text[:200] + "..." if len(node.node.text) > 200 else node.node.text,
                        "metadata": node.node.metadata
                    })
                
                logger.info(f"RAG查询完成，检索到 {len(source_nodes)} 个相关文档")
                
                return {
                    "answer": str(response),
                    "sources": sources,
                    "model_name": model_name or settings.DEFAULT_MODEL_NAME,
                    "index_name": index_name,
                    "use_rag": True
                }
            else:
                # 不使用RAG，直接调用模型生成
                model_manager = ModelManager()
                result = model_manager.generate_text(
                    prompt=query,
                    model_name=model_name
                )
                
                logger.info("直接生成完成")
                
                return {
                    "answer": result["text"],
                    "sources": [],
                    "model_name": model_name or settings.DEFAULT_MODEL_NAME,
                    "use_rag": False
                }
                
        except Exception as e:
            logger.error(f"查询失败: {str(e)}")
            raise
    
    def list_documents(self) -> List[Dict[str, Any]]:
        """列出知识库中的所有文档
        
        Returns:
            文档列表
        """
        documents = []
        
        # 遍历数据目录
        for root, _, files in os.walk(settings.RAG_DATA_DIR):
            for file in files:
                file_path = os.path.join(root, file)
                relative_path = os.path.relpath(file_path, settings.RAG_DATA_DIR)
                
                # 获取文件信息
                stat_info = os.stat(file_path)
                
                documents.append({
                    "name": file,
                    "path": relative_path,
                    "full_path": file_path,
                    "size": stat_info.st_size,
                    "modified_time": stat_info.st_mtime
                })
                
        logger.info(f"找到 {len(documents)} 个文档")
        
        return documents