"""
RAG 完整流程管道
整合文档处理和向量存储功能
"""
import os
from typing import List, Dict, Any, Optional
from pathlib import Path

from langchain_core.documents import Document

from utils.rag_document_handler import RAGDocumentHandler
from utils.vector_store_handler import VectorStoreHandler


class RAGPipeline:
    """
    RAG 完整流程管道
    
    功能：
    1. 加载和切分文档
    2. 向量化并存储到 Milvus
    3. 执行相似性搜索
    """
    
    def __init__(
        self,
        document_handler: Optional[RAGDocumentHandler] = None,
        vector_store: Optional[VectorStoreHandler] = None,
        chunk_size: int = 1000,
        chunk_overlap: int = 100,
    ):
        """
        初始化 RAG 流程管道
        
        :param document_handler: 文档处理器实例，如果为 None 则创建默认实例
        :param vector_store: 向量存储处理器实例
        :param chunk_size: 文档切分的块大小
        :param chunk_overlap: 文档切分的重叠大小
        """
        # 初始化文档处理器
        if document_handler is None:
            self.document_handler = RAGDocumentHandler(
                chunk_size=chunk_size,
                chunk_overlap=chunk_overlap
            )
        else:
            self.document_handler = document_handler
        
        # 向量存储处理器（需要外部提供）
        self.vector_store = vector_store
    
    def process_and_store_file(
        self,
        file_path: str,
        batch_size: int = 100
    ) -> Dict[str, Any]:
        """
        处理文件并存储到向量数据库
        
        :param file_path: 文件路径
        :param batch_size: 批量插入的大小
        :return: 处理结果，包含文档数量和插入的 ID 列表
        """
        if self.vector_store is None:
            raise RuntimeError("向量存储未初始化")
        
        # 加载并切分文档
        split_documents = self.document_handler.load_and_split(file_path)
        
        # 存储到向量数据库
        inserted_ids = self.vector_store.insert_documents(
            documents=split_documents,
            batch_size=batch_size
        )
        
        return {
            "file_path": file_path,
            "chunk_count": len(split_documents),
            "inserted_ids": inserted_ids,
            "success": True
        }
    
    def process_and_store_files(
        self,
        file_paths: List[str],
        batch_size: int = 100
    ) -> List[Dict[str, Any]]:
        """
        批量处理文件并存储到向量数据库
        
        :param file_paths: 文件路径列表
        :param batch_size: 批量插入的大小
        :return: 处理结果列表
        """
        results = []
        
        for file_path in file_paths:
            try:
                result = self.process_and_store_file(file_path, batch_size)
                results.append(result)
            except Exception as e:
                results.append({
                    "file_path": file_path,
                    "error": str(e),
                    "success": False
                })
        
        return results
    
    def process_and_store_directory(
        self,
        directory_path: str,
        file_extensions: Optional[List[str]] = None,
        batch_size: int = 100
    ) -> List[Dict[str, Any]]:
        """
        处理目录中的所有文档并存储到向量数据库
        
        :param directory_path: 目录路径
        :param file_extensions: 要处理的文件扩展名列表，默认处理所有支持的类型
        :param batch_size: 批量插入的大小
        :return: 处理结果列表
        """
        if file_extensions is None:
            file_extensions = ['.pdf', '.txt', '.docx']
        
        # 收集所有匹配的文件
        directory = Path(directory_path)
        file_paths = []
        
        for ext in file_extensions:
            file_paths.extend(directory.glob(f"**/*{ext}"))
        
        # 转换为字符串路径
        file_paths = [str(fp) for fp in file_paths]
        
        # 批量处理
        return self.process_and_store_files(file_paths, batch_size)
    
    def search(
        self,
        query: str,
        top_k: int = 5,
        return_documents: bool = True
    ) -> List[Dict[str, Any]]:
        """
        执行相似性搜索
        
        :param query: 查询文本
        :param top_k: 返回最相似的 top_k 个结果
        :param return_documents: 是否返回 Document 对象
        :return: 搜索结果列表
        """
        if self.vector_store is None:
            raise RuntimeError("向量存储未初始化")
        
        # 执行搜索
        results = self.vector_store.search_similar(
            query_text=query,
            top_k=top_k
        )
        
        # 如果需要返回 Document 对象
        if return_documents:
            for result in results:
                if 'text' in result:
                    # 尝试解析元数据
                    metadata = {}
                    if 'metadata' in result:
                        try:
                            import ast
                            metadata = ast.literal_eval(result['metadata'])
                        except:
                            metadata = {"raw_metadata": result['metadata']}
                    
                    result['document'] = Document(
                        page_content=result['text'],
                        metadata=metadata
                    )
        
        return results
    
    def search_batch(
        self,
        queries: List[str],
        top_k: int = 5,
        return_documents: bool = True
    ) -> List[List[Dict[str, Any]]]:
        """
        批量执行相似性搜索
        
        :param queries: 查询文本列表
        :param top_k: 每个查询返回最相似的 top_k 个结果
        :param return_documents: 是否返回 Document 对象
        :return: 搜索结果列表的列表
        """
        if self.vector_store is None:
            raise RuntimeError("向量存储未初始化")
        
        # 执行批量搜索
        all_results = self.vector_store.search_similar_batch(
            query_texts=queries,
            top_k=top_k
        )
        
        # 如果需要返回 Document 对象
        if return_documents:
            for results in all_results:
                for result in results:
                    if 'text' in result:
                        metadata = {}
                        if 'metadata' in result:
                            try:
                                import ast
                                metadata = ast.literal_eval(result['metadata'])
                            except:
                                metadata = {"raw_metadata": result['metadata']}
                        
                        result['document'] = Document(
                            page_content=result['text'],
                            metadata=metadata
                        )
        
        return all_results
    
    def get_context_for_query(
        self,
        query: str,
        top_k: int = 5,
        separator: str = "\n\n---\n\n"
    ) -> str:
        """
        获取查询的上下文文本（用于 RAG 生成）
        
        :param query: 查询文本
        :param top_k: 返回最相似的 top_k 个结果
        :param separator: 多个文本块之间的分隔符
        :return: 拼接后的上下文文本
        """
        results = self.search(query, top_k, return_documents=False)
        
        # 提取文本并拼接
        texts = [result.get('text', '') for result in results]
        context = separator.join(texts)
        
        return context
    
    def get_stats(self) -> Dict[str, Any]:
        """
        获取流程统计信息
        
        :return: 统计信息字典
        """
        stats = {
            "document_handler": {
                "chunk_size": self.document_handler.chunk_size,
                "chunk_overlap": self.document_handler.chunk_overlap,
                "encoding": self.document_handler.encoding,
            }
        }
        
        if self.vector_store is not None:
            try:
                stats["vector_store"] = self.vector_store.get_collection_stats()
            except:
                stats["vector_store"] = {"error": "无法获取向量存储统计信息"}
        else:
            stats["vector_store"] = {"error": "向量存储未初始化"}
        
        return stats


def create_rag_pipeline_from_config(config: Dict[str, Any]) -> RAGPipeline:
    """
    根据配置创建 RAG 流程管道
    
    :param config: 配置字典
    :return: RAGPipeline 实例
    """
    # 创建文档处理器
    document_handler = RAGDocumentHandler(
        chunk_size=config.get("CHUNK_SIZE", 1000),
        chunk_overlap=config.get("CHUNK_OVERLAP", 100),
        encoding=config.get("ENCODING", "utf-8")
    )
    
    # 创建向量存储处理器
    from utils.vector_store_handler import create_vector_store_from_config
    vector_store = create_vector_store_from_config(config)
    
    # 创建流程管道
    pipeline = RAGPipeline(
        document_handler=document_handler,
        vector_store=vector_store
    )
    
    return pipeline


