import os
import yaml
import numpy as np
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import TextLoader, PyPDFLoader, Docx2txtLoader
from loguru import logger
from backend.database.db import db_manager

class RAGManager:
    _instance = None
    _initialized = False

    def __new__(cls):
        if cls._instance is None:
            cls._instance = super(RAGManager, cls).__new__(cls)
        return cls._instance

    def __init__(self):
        if self._initialized:
            return
        
        # 加载配置
        config_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))), 'configs', 'config.yaml')
        with open(config_path, 'r', encoding='utf-8') as f:
            config = yaml.safe_load(f)
            self.rag_config = config['rag']
            self.db_config = config['database']
        
        self.embedding_model = None
        self.text_splitter = None
        
        # 初始化RAG组件
        self._initialize_components()
        
        self._initialized = True
    
    def _initialize_components(self):
        """初始化RAG组件：文本分割器"""
        try:
            # 初始化文本分割器
            self.text_splitter = RecursiveCharacterTextSplitter(
                chunk_size=self.rag_config.get('chunk_size', 800),
                chunk_overlap=self.rag_config.get('chunk_overlap', 50),
                length_function=len,
                separators=["\n\n", "\n", " ", ""]
            )
            
            logger.info("RAG组件初始化完成")
        except Exception as e:
            logger.error(f"RAG组件初始化失败: {str(e)}")
            raise
            
    def set_embedding_model(self, model):
        """设置嵌入模型
        
        Args:
            model: 嵌入模型（名称或实例）
        """
        try:
            self.embedding_model = model
            logger.info(f"成功设置嵌入模型: {model}")
        except Exception as e:
            logger.error(f"设置嵌入模型失败: {str(e)}")
    
    def _is_cuda_available(self):
        """检查CUDA是否可用"""
        try:
            import torch
            return torch.cuda.is_available()
        except:
            return False
    
    def load_document(self, file_path):
        """加载文档"""
        try:
            file_extension = os.path.splitext(file_path)[1].lower()
            
            if file_extension == '.txt' or file_extension == '.md':
                loader = TextLoader(file_path, encoding='utf-8')
            elif file_extension == '.pdf':
                loader = PyPDFLoader(file_path)
            elif file_extension == '.docx':
                loader = Docx2txtLoader(file_path)
            elif file_extension in ['.xlsx', '.xls', '.csv']:
                # 使用document_parser中的XLSXParser处理Excel文件
                from backend.common.document_parser import DocumentParserFactory
                parser = DocumentParserFactory.get_parser(file_path)
                content = parser.parse(file_path)
                # 转换为langchain文档格式
                from langchain.schema import Document
                return [Document(page_content=content)]
            else:
                raise ValueError(f"不支持的文件格式: {file_extension}")
            
            documents = loader.load()
            logger.info(f"文档加载成功: {file_path}, 页数: {len(documents)}")
            return documents
        except Exception as e:
            logger.error(f"文档加载失败: {str(e)}")
            raise
    
    def split_document(self, documents):
        """分割文档成块"""
        try:
            chunks = self.text_splitter.split_documents(documents)
            logger.info(f"文档分割完成，生成块数: {len(chunks)}")
            return chunks
        except Exception as e:
            logger.error(f"文档分割失败: {str(e)}")
            raise
    
    def generate_embeddings(self, texts):
        """为文本生成嵌入向量"""
        try:
            # 从配置获取当前活跃的LLM提供商
            from backend.common.config import config
            from backend.llm import llm_manager
            
            active_provider = config.llm_providers.active_provider
            llm_provider = llm_manager.get_provider(active_provider)
            
            # 使用LLM提供商生成嵌入向量
            embeddings = []
            for text in texts:
                embedding = llm_provider.generate_embedding(text)
                embeddings.append(np.array(embedding))
            return np.array(embeddings)
        except Exception as e:
            logger.error(f"嵌入向量生成失败: {str(e)}")
            raise
    
    def add_document_to_vector_db(self, file_path, metadata=None):
        """将文档添加到向量数据库"""
        try:
            # 加载文档
            documents = self.load_document(file_path)
            
            # 分割文档
            chunks = self.split_document(documents)
            
            # 批量处理文档块
            batch_size = self.rag_config['retrieval_batch_size']
            total_chunks = len(chunks)
            processed_chunks = 0
            
            for i in range(0, total_chunks, batch_size):
                batch = chunks[i:i + batch_size]
                texts = [chunk.page_content for chunk in batch]
                
                # 生成嵌入向量
                embeddings = self.generate_embeddings(texts)
                
                # 添加到数据库
                for j, (text, embedding) in enumerate(zip(texts, embeddings)):
                    chunk_metadata = {
                        'source': file_path,
                        'chunk_index': i + j,
                        'total_chunks': total_chunks,
                        'page': batch[j].metadata.get('page', 0)
                    }
                    
                    # 合并自定义元数据
                    if metadata:
                        chunk_metadata.update(metadata)
                    
                    # 插入到数据库
                    db_manager.insert_document(
                        content=text,
                        metadata=chunk_metadata,
                        vector=embedding.tolist()
                    )
                
                processed_chunks += len(batch)
                logger.info(f"处理进度: {processed_chunks}/{total_chunks} 块已添加到向量数据库")
            
            logger.info(f"文档 {file_path} 成功添加到向量数据库")
            return total_chunks
        except Exception as e:
            logger.error(f"文档添加到向量数据库失败: {str(e)}")
            raise
    
    def retrieve_relevant_docs(self, query, top_k=None, score_threshold=None):
        """检索与查询相关的文档"""
        try:
            top_k = top_k or self.rag_config.get('top_k', 5)
            score_threshold = score_threshold or self.rag_config['score_threshold']
            
            # 生成查询向量
            query_vector = self.generate_embeddings([query])[0]
            
            # 在向量数据库中搜索
            results = db_manager.search_documents(
                query_vector=query_vector.tolist(),
                top_k=top_k,
                score_threshold=score_threshold
            )
            
            # 格式化结果
            formatted_results = []
            for result in results:
                formatted_results.append({
                    'id': result['id'],
                    'content': result['content'],
                    'metadata': result['metadata'],
                    'similarity_score': float(result['similarity'])
                })
            
            logger.info(f"检索到 {len(formatted_results)} 条相关文档")
            return formatted_results
        except Exception as e:
            logger.error(f"文档检索失败: {str(e)}")
            # 如果检索失败，返回空列表，让系统使用基础模型回答
            return []
    
    def build_context(self, query, relevant_docs):
        """构建上下文"""
        try:
            if not relevant_docs:
                return ""
            
            # 构建上下文
            context_parts = []
            context_parts.append("根据以下信息回答问题:\n")
            
            for i, doc in enumerate(relevant_docs):
                context_parts.append(f"[信息 {i+1}]\n{doc['content']}\n\n")
            
            context_parts.append(f"问题: {query}\n")
            context_parts.append("请基于上述信息，用中文简洁明了地回答问题。如果信息不足，请直接说明无法回答。\n")
            
            context = "".join(context_parts)
            return context
        except Exception as e:
            logger.error(f"上下文构建失败: {str(e)}")
            return ""
    
    def get_context_for_query(self, query):
        """为查询获取上下文信息
        
        Args:
            query: 用户查询
        
        Returns:
            str: 构建好的上下文文本
        """
        try:
            # 增强查询
            augmented_query = self.augment_query(query)
            
            # 检索相关文档
            relevant_docs = self.retrieve_relevant_docs(augmented_query)
            
            # 构建上下文
            context = self.build_context(query, relevant_docs)
            
            logger.info(f"成功为查询构建上下文，包含 {len(relevant_docs)} 条相关文档")
            return context
        except Exception as e:
            logger.error(f"获取查询上下文失败: {str(e)}")
            # 返回空字符串，让系统使用基础模型回答
            return ""
    
    def augment_query(self, query):
        """增强查询，提高检索效果"""
        # 这里可以实现查询扩展、重写等功能
        # 简单实现：直接返回原查询
        return query
    
    def get_rag_stats(self):
        """获取RAG系统统计信息"""
        try:
            # 内存模式下直接返回默认统计，避免键缺失
            if getattr(db_manager, 'in_memory_mode', False):
                return {
                    'total_documents': 0,
                    'embedding_model': self.db_config.get('embedding_model', ''),
                    'chunk_size': self.rag_config.get('chunk_size', 0),
                    'recent_documents': []
                }

            with db_manager.get_cursor() as cursor:
                # 获取文档总数
                cursor.execute("SELECT COUNT(*) as total_docs FROM documents")
                row = cursor.fetchone()
                # 兼容tuple/DicCursor两种返回
                total_docs = row['total_docs'] if isinstance(row, dict) or hasattr(row, 'keys') else (row[0] if row else 0)
                
                # 获取最近添加的文档
                cursor.execute("SELECT id, content, created_at FROM documents ORDER BY created_at DESC LIMIT 5")
                recent_docs = cursor.fetchall() or []
                
                return {
                    'total_documents': total_docs,
                    'embedding_model': self.db_config.get('embedding_model', ''),
                    'chunk_size': self.rag_config.get('chunk_size', 0),
                    'recent_documents': recent_docs
                }
        except Exception as e:
            logger.error(f"获取RAG统计信息失败: {str(e)}")
            return {
                'total_documents': 0,
                'embedding_model': self.db_config.get('embedding_model', ''),
                'chunk_size': self.rag_config.get('chunk_size', 0),
                'recent_documents': []
            }
    
    def clear_vector_db(self):
        """清空向量数据库"""
        try:
            with db_manager.get_cursor() as cursor:
                cursor.execute("DELETE FROM documents")
                db_manager.connection.commit()
                logger.info("向量数据库已清空")
                return True
        except Exception as e:
            db_manager.connection.rollback()
            logger.error(f"清空向量数据库失败: {str(e)}")
            return False

# 全局RAG管理器实例
rag_manager = RAGManager()