"""
RAG引擎核心模块
提供文档处理、向量存储和本地LLM的集成服务
"""

import os
import logging
import hashlib
import re
from typing import List, Dict, Any, Optional
import time
import torch

from langchain_community.document_loaders import TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.schema import Document

from .vector_store import VectorStoreManager
from .local_llm import local_llm_manager
from .persona_manager import persona_manager

logger = logging.getLogger(__name__)


class RAGEngine:
    """RAG引擎主类"""
    
    def __init__(self, config: Optional[Dict[str, Any]] = None):
        """初始化RAG引擎
        
        Args:
            config: 配置字典，如果为None则使用默认配置
        """
        logger.info("初始化RAG引擎...")
        
        # 初始化配置
        self.config = config or {}
        
        # 清理初始内存
        if torch.cuda.is_available():
            torch.cuda.empty_cache()
            import gc
            gc.collect()
        
        # 显示配置参数
        logger.info("RAG配置参数:")
        logger.info(f"  文档目录: {self.config.get('documents_dir', './documents')}")
        logger.info(f"  向量存储目录: {self.config.get('vector_store_dir', './chroma_db')}")
        logger.info(f"  本地LLM路径: {self.config.get('local_llm_path', './models/Qwen2.5-VL-3B-Instruct')}")
        logger.info(f"  嵌入模型路径: {self.config.get('embedding_model_path', './models/bge-large-zh-v1.5')}")
        logger.info(f"  文本分割器: {self.config.get('text_splitter_type', 'recursive')}")
        logger.info(f"  块大小: {self.config.get('chunk_size', 1000)}")
        logger.info(f"  重叠大小: {self.config.get('chunk_overlap', 200)}")
        logger.info(f"  混合检索: {self.config.get('use_hybrid_retrieval', True)}")
        logger.info(f"  向量权重: {self.config.get('vector_weight', 0.7)}")
        logger.info(f"  BM25权重: {self.config.get('bm25_weight', 0.3)}")
        logger.info(f"  使用重排序: {self.config.get('use_reranker', True)}")
        logger.info(f"  重排序模型: {self.config.get('reranker_model', 'paraphrase-MiniLM-L6-v2')}")
        
        # 初始化各个组件
        start_time = time.time()
        
        # 定义文档加载器映射 - 仅支持文本文件
        self.document_loaders = {
            '.txt': TextLoader,
            '.md': TextLoader,
        }
        
        # 1. 初始化文本分割器
        logger.info("正在初始化文本分割器...")
        split_start = time.time()
        self.text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=self.config.get('chunk_size', 600),  # 减小分块大小以提高精度
            chunk_overlap=self.config.get('chunk_overlap', 100),  # 减小重叠以避免冗余
            separators=["\n\n", "\n", "。", "！", "？", ";", ";", ".", "!", "?", " ", ""]
        )
        split_time = time.time() - split_start
        logger.info(f"文本分割器初始化完成 (耗时: {split_time:.2f}s)")
        
        # 2. 初始化向量存储管理器
        logger.info("正在初始化向量存储管理器...")
        vec_start = time.time()
        
        # 导入EmbeddingManager
        from .embedding_manager import EmbeddingManager
        
        # 创建嵌入管理器
        embedding_manager = EmbeddingManager()
        
        self.vector_store_manager = VectorStoreManager(
            persist_directory=self.config.get('vector_store_dir', './chroma_db'),
            embedding_manager=embedding_manager
        )
        vec_time = time.time() - vec_start
        logger.info(f"向量存储管理器初始化完成 (耗时: {vec_time:.2f}s)")
        
        # 3. 跳过复杂的混合检索器，使用简单向量检索
        logger.info("使用简化的向量检索模式")
        
        # 4. 跳过重排序器以节省内存
        self.reranker = None
        logger.info("为节省内存，跳过重排序器")
        
        # 5. 本地大语言模型已全局初始化
        logger.info("本地大语言模型已准备就绪 (Qwen2.5-VL-3B-Instruct)")
        self.local_llm_manager = local_llm_manager
        
        total_time = time.time() - start_time
        logger.info(f"RAG引擎初始化完成 (总耗时: {total_time:.2f}s)")
    

    
    def _clear_memory(self):
         """清理GPU内存"""
         if torch.cuda.is_available():
             torch.cuda.empty_cache()
         import gc
         gc.collect()
    
    def process_document(self, file_path: str, file_type: str = None) -> Dict[str, Any]:
        """
        处理单个文档
        
        Args:
            file_path: 文件路径
            file_type: 文件类型（可选，自动检测）
            
        Returns:
            处理结果字典
        """
        import hashlib
        
        logger.info(f"📄 开始处理文档: {file_path}")
        start_time = time.time()
        
        try:
            # 生成文档ID
            doc_id = hashlib.md5(file_path.encode()).hexdigest()[:8]
            
            # 自动检测文件类型
            if not file_type:
                _, ext = os.path.splitext(file_path)
                file_type = ext.lower()
            
            # 获取对应的加载器
            loader_class = self.document_loaders.get(file_type)
            if not loader_class:
                return {
                    "status": "error",
                    "message": f"不支持的文件类型: {file_type}",
                    "file_path": file_path
                }
            
            # 加载文档
            logger.info(f"📖 正在加载文档...")
            # 使用utf-8编码处理中文文件
            loader = loader_class(file_path, encoding='utf-8')
            documents = loader.load()
            
            # 分割文档
            logger.info(f"✂️ 正在智能分割文档...")
            texts = self.text_splitter.split_documents(documents)
            
            # 简化文档数据准备
            logger.info(f"📝 正在准备文档数据...")
            doc_data = []
            for i, text in enumerate(texts):
                doc_data.append({
                    'page_content': text.page_content,
                    'metadata': {
                        **text.metadata,
                        'source': file_path,
                        'document_id': doc_id,
                        'chunk_id': i,
                        'file_extension': file_type,
                        'chunk_length': len(text.page_content)
                    }
                })
            
            # 添加到向量存储
            logger.info(f"💾 正在添加到向量存储...")
            doc_ids = self.vector_store_manager.add_documents(doc_data)
            
            # 跳过复杂的检索器重新初始化
            logger.info("文档添加完成，使用简化检索模式")
            
            process_time = time.time() - start_time
            logger.info(f"✅ 文档处理完成！耗时: {process_time:.2f}s")
            
            return {
                "status": "success",
                "file_path": file_path,
                "document_id": doc_id,
                "filename": os.path.basename(file_path),
                "chunks_processed": len(texts),
                "doc_ids": doc_ids,
                "process_time": process_time,
                "enhanced_metadata": {
                    "keywords_extracted": sum(1 for chunk in doc_data if chunk['metadata'].get('keywords')),
                    "summaries_generated": sum(1 for chunk in doc_data if chunk['metadata'].get('summary')),
                    "section_titles_found": sum(len(chunk['metadata'].get('section_titles', [])) for chunk in doc_data),
                    "reading_time_total": sum(chunk['metadata'].get('reading_time_minutes', 0) for chunk in doc_data)
                }
            }
            
        except Exception as e:
            logger.error(f"❌ 文档处理失败: {str(e)}")
            return {
                "status": "error",
                "message": str(e),
                "file_path": file_path
            }

    # 删除复杂的关键词提取和摘要生成方法，简化代码结构
    
    def query(self, question: str, k: int = 4, use_persona: bool = True) -> Dict[str, Any]:
        """
        查询RAG系统（支持角色扮演）
        
        Args:
            question: 用户问题
            k: 返回的文档数量
            use_persona: 是否使用角色扮演功能
            
        Returns:
            查询结果字典
        """
        logger.info(f"🔍 开始查询: {question}")
        start_time = time.time()
        
        try:
            # 获取当前角色信息
            current_persona = persona_manager.get_current_persona() if use_persona else None
            persona_name = current_persona['name'] if current_persona else "智能助手"
            
            logger.info(f"🎭 当前角色: {persona_name}")
            
            # 使用简化的向量检索
            logger.info(f"📚 正在检索相关文档...")
            search_results = self.vector_store_manager.similarity_search_with_score(question, k=k)
            
            if not search_results:
                logger.warning("⚠️ 未找到相关文档，使用角色回退模式")
                return self._generate_persona_fallback_response(question, use_persona, start_time)
            
            # 计算最大相似度
            max_similarity = max([score for _, score in search_results]) if search_results else 0.0
            logger.info(f"📊 最大相似度: {max_similarity:.3f}")
            
            # 判断是否使用角色回退模式
            if use_persona and persona_manager.should_use_persona_fallback(max_similarity):
                logger.info(f"🎭 相似度低于阈值({persona_manager.similarity_threshold})，使用角色回退模式")
                return self._generate_persona_fallback_response(question, use_persona, start_time)
            
            # 提取文档和来源信息
            relevant_docs = [doc for doc, score in search_results]
            sources = []
            context_parts = []
            
            # 按相似度排序，确保最相关的内容在前面
            sorted_results = sorted(search_results, key=lambda x: x[1], reverse=True)
            
            for i, (doc, score) in enumerate(sorted_results, 1):
                # 优化上下文格式，去掉"文档X:"前缀，直接使用内容
                context_parts.append(doc.page_content.strip())
                sources.append({
                    "content": doc.page_content[:200] + "..." if len(doc.page_content) > 200 else doc.page_content,
                    "source": doc.metadata.get("source", "未知"),
                    "metadata": doc.metadata,
                    "similarity": float(score)
                })
            
            # 使用更清晰的分隔符
            context = "\n\n---\n\n".join(context_parts)
            
            # 生成角色化提示词
            if use_persona and current_persona:
                prompt = persona_manager.generate_persona_prompt(question, context)
                logger.info(f"🎭 使用{persona_name}角色提示词")
            else:
                prompt = f"""基于以下文档内容回答问题：

{context}

问题：{question}

请基于提供的文档内容给出准确、简洁的回答。如果文档中没有相关信息，请明确说明。"""
            
            # 生成回答
            logger.info("🤖 正在生成回答...")
            answer = self.local_llm_manager.generate(prompt)
            
            query_time = time.time() - start_time
            logger.info(f"✅ 查询完成！耗时: {query_time:.2f}s")
            
            return {
                "status": "success",
                "question": question,
                "answer": answer,
                "sources": sources,
                "query_time": query_time,
                "relevant_docs_count": len(relevant_docs),
                "max_similarity": max_similarity,
                "persona": persona_name if use_persona else None,
                "mode": "document_based"
            }
            
        except Exception as e:
            logger.error(f"❌ 查询失败: {str(e)}")
            return {
                "status": "error",
                "message": str(e),
                "question": question,
                "answer": "抱歉，处理查询时出现了错误。",
                "persona": persona_name if use_persona else None
            }
    
    def _generate_persona_fallback_response(self, question: str, use_persona: bool, start_time: float) -> Dict[str, Any]:
        """
        生成角色回退响应
        
        Args:
            question: 用户问题
            use_persona: 是否使用角色扮演
            start_time: 开始时间
            
        Returns:
            查询结果字典
        """
        try:
            current_persona = persona_manager.get_current_persona() if use_persona else None
            persona_name = current_persona['name'] if current_persona else "智能助手"
            
            if use_persona and current_persona:
                # 使用角色回退提示词
                prompt = persona_manager.generate_persona_prompt(question)
                logger.info(f"🎭 使用{persona_name}角色回退模式")
            else:
                # 使用默认回答
                prompt = f"请回答以下问题：{question}"
            
            # 生成回答
            answer = self.local_llm_manager.generate(prompt)
            
            query_time = time.time() - start_time
            logger.info(f"✅ 角色回退回答完成！耗时: {query_time:.2f}s")
            
            return {
                "status": "success",
                "question": question,
                "answer": answer,
                "sources": [],
                "query_time": query_time,
                "relevant_docs_count": 0,
                "max_similarity": 0.0,
                "persona": persona_name if use_persona else None,
                "mode": "persona_fallback"
            }
            
        except Exception as e:
            logger.error(f"❌ 角色回退失败: {str(e)}")
            return {
                "status": "error",
                "message": str(e),
                "question": question,
                "answer": "抱歉，处理查询时出现了错误。",
                "persona": persona_name if use_persona else None
            }