
"""
RAG系统实现
使用LangChain、Milvus、Elasticsearch和通义千问构建检索增强生成系统
"""

import os
import sys
import re
import numpy as np
from typing import List, Dict, Any, Optional
from pathlib import Path

# 添加项目根目录到路径
if __name__ == "__main__":
    project_root = Path(__file__).parent.parent
    sys.path.insert(0, str(project_root))

# LangChain相关导入
# PyPDFLoader用于加载PDF文档
from langchain_community.document_loaders import PyPDFLoader
# 文本分割器，用于将文档分割成适当大小的块

# 导入菜谱解析器
from parse_recipe import parse_recipes_from_pdf_text
try:
    from langchain_text_splitters import RecursiveCharacterTextSplitter
except ImportError:
    try:
        from langchain.text_splitter import RecursiveCharacterTextSplitter
    except ImportError:
        raise ImportError("请安装 langchain-text-splitters: pip install langchain-text-splitters")

# DashScope相关导入
# DashScope是阿里云的大模型服务接口
try:
    import dashscope
    from dashscope import Generation, TextEmbedding
    from langchain_community.embeddings import DashScopeEmbeddings
    DashScopeEmbeddings = DashScopeEmbeddings
except ImportError:
    try:
        import dashscope
        from dashscope import Generation, TextEmbedding
        DashScopeEmbeddings = None
    except ImportError:
        raise ImportError("请安装 dashscope: pip install dashscope langchain-community")

# 解析菜谱信息
from xiaohongshu_rag.parse_recipe import parse_recipes_from_text

# Milvus向量数据库相关导入
from pymilvus import connections, utility, FieldSchema, CollectionSchema, DataType, Collection

# Elasticsearch搜索引擎相关导入
from elasticsearch import Elasticsearch

# 配置导入
try:
    from config import settings
except ImportError:
    class Settings:
        MILVUS_HOST = os.getenv("MILVUS_HOST", "localhost")
        MILVUS_PORT = int(os.getenv("MILVUS_PORT", "19530"))
        MILVUS_COLLECTION_NAME = os.getenv("MILVUS_COLLECTION_NAME", "rag_documents")
        ES_HOST = os.getenv("ES_HOST", "localhost")
        ES_PORT = int(os.getenv("ES_PORT", "9200"))
        ES_INDEX_NAME = os.getenv("ES_INDEX_NAME", "rag_documents")
    settings = Settings()


class RAGSystem:
    """RAG系统主类"""

    # 常量定义
    EMBEDDING_DIM = 1024  # 向量维度
    BATCH_SIZE = 10       # 批处理大小
    MIN_TEXT_LENGTH = 10  # 最小文本长度

    def __init__(self, api_key: str = "sk-2c036d80d14a443f8769e072bb09a2e1"):
        """初始化RAG系统
        Args:
            api_key: DashScope API密钥
        """
        dashscope.api_key = api_key
        self.api_key = api_key
        self.llm_model = "qwen3-max"  # 使用的LLM模型

        # 初始化文本分割器
        # 优先使用语义分割器，如果不可用则使用字符递归分割器
        try:
            from langchain_text_splitters import SemanticChunker
            self.text_splitter = SemanticChunker(
                self.embeddings,
                breakpoint_threshold_type="percentile",
                breakpoint_threshold=95
            )
            print("[初始化] 使用语义分割器")
        except ImportError:
            self.text_splitter = RecursiveCharacterTextSplitter(
                chunk_size=350,      # 每个文本块的最大字符数
                chunk_overlap=50,    # 文本块之间的重叠字符数
                length_function=len   # 长度计算函数
            )

        # 初始化向量化模型
        self.embeddings = DashScopeEmbeddings(
            model="text-embedding-v3",
            dashscope_api_key=api_key
        ) if DashScopeEmbeddings else None

        # 获取配置
        self._load_config()

        # 初始化连接
        self._init_milvus()
        self._init_elasticsearch()

    def _load_config(self):
        """加载配置
        从settings或环境变量中加载配置参数
        """
        get_config = lambda key, default: os.getenv(key) or getattr(settings, key, default)
        self.milvus_collection_name = get_config("MILVUS_COLLECTION_NAME", "rag_documents")
        self.milvus_host = get_config("MILVUS_HOST", "localhost")
        self.milvus_port = int(get_config("MILVUS_PORT", "19530"))
        self.es_index_name = get_config("ES_INDEX_NAME", "rag_documents")
        self.es_host = get_config("ES_HOST", "localhost")
        self.es_port = int(get_config("ES_PORT", "9200"))

    def _get_milvus_schema(self):
        """获取Milvus集合schema
        定义Milvus集合的字段结构
        Returns:
            list: Milvus字段定义列表
        """
        return [
            FieldSchema(name="id", dtype=DataType.INT64, is_primary=True, auto_id=True),
            FieldSchema(name="text", dtype=DataType.VARCHAR, max_length=10000),
            FieldSchema(name="embedding", dtype=DataType.FLOAT_VECTOR, dim=self.EMBEDDING_DIM),
            FieldSchema(name="source", dtype=DataType.VARCHAR, max_length=500),
            FieldSchema(name="chunk_index", dtype=DataType.INT64),
        ]

    def _create_milvus_collection(self):
        """创建Milvus集合
        创建一个新的Milvus集合并建立索引
        Returns:
            Collection: 创建的Milvus集合对象
        """
        schema = CollectionSchema(fields=self._get_milvus_schema(), description="RAG文档向量集合")
        collection = Collection(name=self.milvus_collection_name, schema=schema)
        collection.create_index(
            field_name="embedding",
            index_params={"metric_type": "L2", "index_type": "HNSW", "params": {"M": 16, "efConstruction": 200}}
        )
        return collection

    def _check_milvus_schema(self, collection):
        """检查Milvus集合schema是否匹配
        验证现有集合的结构是否符合预期
        Args:
            collection: Milvus集合对象
        Returns:
            bool: schema是否匹配
        """
        expected_fields = ["id", "text", "embedding", "source", "chunk_index"]
        existing_fields = [f.name for f in collection.schema.fields]
        
        if not all(f in existing_fields for f in expected_fields):
            return False

        # 检查embedding维度
        for field in collection.schema.fields:
            if field.name == "embedding" and field.dtype == DataType.FLOAT_VECTOR:
                dim = getattr(field, 'dim', None) or \
                      (field.params.get('dim') if hasattr(field, 'params') else None) or \
                      (field.type_params.get('dim') if hasattr(field, 'type_params') else None)
                return dim == self.EMBEDDING_DIM if dim else False
        return False

    def _init_milvus(self):
        """初始化Milvus向量数据库
        建立与Milvus的连接，创建或加载集合
        """
        try:
            connections.connect(alias="default", host=self.milvus_host, port=self.milvus_port)
            print(f"[Milvus] 已连接到 {self.milvus_host}:{self.milvus_port}")

            if utility.has_collection(self.milvus_collection_name):
                collection = Collection(self.milvus_collection_name)
                if not self._check_milvus_schema(collection):
                    print(f"[Milvus] Schema不匹配，重建集合")
                    collection.release()
                    utility.drop_collection(self.milvus_collection_name)
                    self.milvus_collection = self._create_milvus_collection()
                    print(f"[Milvus] 集合重建成功")
                else:
                    self.milvus_collection = collection
                    print(f"[Milvus] 集合已存在，已加载")
            else:
                self.milvus_collection = self._create_milvus_collection()
                print(f"[Milvus] 集合创建成功")

            self.milvus_collection.load()
            print("[Milvus] 集合已加载到内存")
        except Exception as e:
            print(f"[Milvus] 初始化失败: {e}")
            self.milvus_collection = None

    def _init_elasticsearch(self):
        """初始化Elasticsearch
        建立与Elasticsearch的连接，创建索引
        """
        try:
            es_url = f"http://{self.es_host}:{self.es_port}"
            self.es_client = Elasticsearch(
                [es_url],
                request_timeout=10,
                max_retries=3,
                retry_on_timeout=True,
                verify_certs=False,
                ssl_show_warn=False
            )

            # 检查连接
            if not (self.es_client.ping() or self.es_client.info()):
                raise Exception("无法连接到Elasticsearch")

            version = self.es_client.info().get('version', {}).get('number', 'unknown')
            print(f"[ES] 已连接到 {self.es_host}:{self.es_port} (版本: {version})")

            # 创建索引
            if not self.es_client.indices.exists(index=self.es_index_name):
                mapping = {
                    "mappings": {
                        "properties": {
                            "text": {"type": "text"},
                            "source": {"type": "keyword"},
                            "chunk_index": {"type": "integer"},
                        }
                    }
                }
                self.es_client.indices.create(index=self.es_index_name, **mapping)
                print(f"[ES] 索引 '{self.es_index_name}' 创建成功")
            else:
                print(f"[ES] 索引 '{self.es_index_name}' 已存在")
        except Exception as e:
            print(f"[ES] 连接失败: {e}")
            print("[ES] 提示: Elasticsearch是可选的，系统将仅使用语义检索（Milvus）")
            self.es_client = None

    def _embed_texts(self, texts: List[str]) -> List[List[float]]:
        """向量化文本列表
        将文本列表转换为向量表示
        Args:
            texts: 待向量化的文本列表
        Returns:
            List[List[float]]: 文本向量列表
        """
        if self.embeddings:
            return self.embeddings.embed_documents(texts)
        
        # 使用原生API
        embeddings = []
        for text in texts:
            try:
                resp = TextEmbedding.call(model="text-embedding-v3", input=text)
                if resp.status_code == 200 and resp.output:
                    embeddings.append(resp.output['embeddings'][0]['embedding'])
                else:
                    embeddings.append([0.0] * self.EMBEDDING_DIM)
            except Exception as e:
                print(f"[向量化] 错误: {e}")
                embeddings.append([0.0] * self.EMBEDDING_DIM)
        return embeddings

    def _embed_query(self, query: str) -> List[float]:
        """向量化查询
        将查询文本转换为向量表示
        Args:
            query: 查询文本
        Returns:
            List[float]: 查询文本向量
        """
        if self.embeddings:
            return self.embeddings.embed_query(query)
        
        resp = TextEmbedding.call(model="text-embedding-v3", input=query)
        if resp.status_code == 200 and resp.output:
            return resp.output['embeddings'][0]['embedding']
        raise Exception(f"向量化失败: {resp.message}")

    def load_documents(self, pdf_paths: List[str]) -> List[Dict[str, Any]]:
        """加载PDF文档
        使用PyPDFLoader加载PDF文件内容
        Args:
            pdf_paths: PDF文件路径列表
        Returns:
            List[Dict[str, Any]]: 加载的文档列表
        """
        all_documents = []
        for pdf_path in pdf_paths:
            if not os.path.isabs(pdf_path):
                pdf_path = os.path.join(os.path.dirname(__file__), pdf_path)
            
            if not os.path.exists(pdf_path):
                print(f"[警告] 文件不存在: {pdf_path}")
                continue

            try:
                # 检查是否为菜谱文件
                if "菜谱" in pdf_path or "recipe" in pdf_path.lower():
                    # 如果是菜谱文件，使用专门的解析器
                    recipes = parse_recipes_from_pdf_text(pdf_path)
                    # 将解析出的菜谱转换为文档列表
                    for recipe in recipes:
                        doc = {
                            "page_content": f"菜品名称：{recipe.name}\n材料：{recipe.materials}\n做法：{recipe.method}",
                            "metadata": {
                                "source": pdf_path,
                                "page": 0,  # 假设所有菜谱在一页上
                                "chunk_index": 0
                            }
                        }
                        all_documents.append(doc)
                else:
                    # 对于其他类型的PDF，使用默认的PyPDFLoader
                    loader = PyPDFLoader(pdf_path)
                    documents = loader.load()
                    for doc in documents:
                        doc.metadata["source"] = pdf_path
                    all_documents.extend(documents)
                    print(f"[加载] 成功加载 {len(documents)} 页: {pdf_path}")
            except Exception as e:
                print(f"[加载] 加载失败 {pdf_path}: {e}")
        return all_documents

    def preprocess_text(self, text: str) -> str:
        """数据预处理和清洗
        清理文本内容，去除多余字符
        Args:
            text: 原始文本
        Returns:
            str: 清洗后的文本
        """
        text = re.sub(r'\s+', ' ', text)  # 将多个空白字符替换为单个空格
        text = re.sub(r'[^\u4e00-\u9fa5a-zA-Z0-9\s，。！？；：、""''（）【】《》]', '', text)  # 保留中英文、数字和常用标点
        return text.strip()

    def split_documents(self, documents: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """分割文档
        将文档分割成较小的文本块
        Args:
            documents: 文档列表
        Returns:
            List[Dict[str, Any]]: 分割后的文本块列表
        """
        print("[分割] 开始分割文档...")
        processed_chunks = []
        
        for doc in documents:
            cleaned_text = self.preprocess_text(doc.page_content)
            if len(cleaned_text) < self.MIN_TEXT_LENGTH:
                continue

            chunks = self.text_splitter.split_text(cleaned_text)
            for chunk_idx, chunk in enumerate(chunks):
                if len(chunk.strip()) >= self.MIN_TEXT_LENGTH:
                    processed_chunks.append({
                        "text": chunk,
                        "source": doc.metadata.get("source", "unknown"),
                        "page": doc.metadata.get("page", 0),
                        "chunk_index": chunk_idx
                    })

        print(f"[分割] 文档分割完成，共生成 {len(processed_chunks)} 个chunk")
        return processed_chunks

    def vectorize_and_store(self, chunks: List[Dict[str, Any]]):
        """向量化并存储到Milvus
        将文本块向量化并存储到Milvus向量数据库
        Args:
            chunks: 文本块列表
        """
        if not self.milvus_collection:
            print("[向量化] Milvus未初始化，跳过向量存储")
            return

        print("[向量化] 开始向量化文档...")
        texts = [chunk["text"] for chunk in chunks]
        embeddings = []

        # 批量向量化
        for i in range(0, len(texts), self.BATCH_SIZE):
            batch_texts = texts[i:i + self.BATCH_SIZE]
            batch_embeddings = self._embed_texts(batch_texts)
            embeddings.extend(batch_embeddings)
            print(f"[向量化] 已处理 {min(i + self.BATCH_SIZE, len(texts))}/{len(texts)} 条")

        # 准备并插入数据
        milvus_data = [{
            "text": chunk["text"][:10000],
            "embedding": embeddings[i],
            "source": chunk["source"][:500],
            "chunk_index": chunk["chunk_index"]
        } for i, chunk in enumerate(chunks)]

        print("[Milvus] 开始插入向量数据...")
        self.milvus_collection.insert(milvus_data)
        self.milvus_collection.flush()
        print(f"[Milvus] 插入完成，共 {len(milvus_data)} 条记录")

    def sync_to_elasticsearch(self, chunks: List[Dict[str, Any]]):
        """同步数据到Elasticsearch
        将文本块同步到Elasticsearch搜索引擎
        Args:
            chunks: 文本块列表
        """
        if not self.es_client:
            if not hasattr(self, '_es_sync_warning_shown'):
                print("[ES] Elasticsearch未初始化，跳过ES同步")
                self._es_sync_warning_shown = True
            return

        print("[ES] 开始同步数据到Elasticsearch...")
        for idx, chunk in enumerate(chunks):
            self.es_client.index(
                index=self.es_index_name,
                id=idx,
                document={"text": chunk["text"], "source": chunk["source"], "chunk_index": chunk["chunk_index"]}
            )
        self.es_client.indices.refresh(index=self.es_index_name)
        print(f"[ES] 同步完成，共 {len(chunks)} 条记录")

    def es_search(self, query: str, top_k: int = 5) -> List[Dict[str, Any]]:
        """ES检索
        使用Elasticsearch进行关键词检索
        Args:
            query: 查询文本
            top_k: 返回结果数量
        Returns:
            List[Dict[str, Any]]: 检索结果列表
        """
        if not self.es_client:
            if not hasattr(self, '_es_warning_shown'):
                print("[ES检索] Elasticsearch未初始化，跳过ES检索")
                self._es_warning_shown = True
            return []

        try:
            # 尝试使用IK分词器，失败则回退到标准match
            try:
                response = self.es_client.search(
                    index=self.es_index_name,
                    query={"match_phrase": {"text": {"query": query, "analyzer": "ik_max_word"}}},
                    size=top_k
                )
            except:
                response = self.es_client.search(
                    index=self.es_index_name,
                    query={"match": {"text": query}},
                    size=top_k
                )

            return [{
                "text": hit["_source"]["text"],
                "source": hit["_source"]["source"],
                "score": hit["_score"],
                "chunk_index": hit["_source"]["chunk_index"]
            } for hit in response["hits"]["hits"]]
        except Exception as e:
            print(f"[ES检索] 错误: {e}")
            return []

    def semantic_search(self, query: str, top_k: int = 5) -> List[Dict[str, Any]]:
        """语义检索（Milvus向量检索）
        使用Milvus进行向量相似度检索
        Args:
            query: 查询文本
            top_k: 返回结果数量
        Returns:
            List[Dict[str, Any]]: 检索结果列表
        """
        if not self.milvus_collection:
            print("[语义检索] Milvus未初始化")
            return []

        try:
            query_embedding = self._embed_query(query)
            results = self.milvus_collection.search(
                data=[query_embedding],
                anns_field="embedding",
                param={"metric_type": "L2", "params": {"nprobe": 96}},
                limit=top_k,
                output_fields=["text", "source", "chunk_index"]
            )

            return [{
                "text": hit.entity.get("text"),
                "source": hit.entity.get("source"),
                "score": hit.distance,
                "chunk_index": hit.entity.get("chunk_index")
            } for hit_list in results for hit in hit_list]
        except Exception as e:
            print(f"[语义检索] 错误: {e}")
            return []

    def merge_and_deduplicate(self, es_results: List[Dict], semantic_results: List[Dict]) -> List[Dict[str, Any]]:
        """合并去重
        合并ES检索和语义检索的结果，并去除重复项
        Args:
            es_results: ES检索结果
            semantic_results: 语义检索结果
        Returns:
            List[Dict[str, Any]]: 合并去重后的结果列表
        """
        seen_texts = set()
        merged_results = []

        # 合并结果
        for result in es_results:
            merged_results.append({**result, "retrieval_type": "es"})
        for result in semantic_results:
            merged_results.append({**result, "retrieval_type": "semantic"})

        # 去重
        unique_results = []
        for result in merged_results:
            text_key = result["text"][:100]
            if text_key not in seen_texts:
                seen_texts.add(text_key)
                unique_results.append(result)

        return unique_results

    def normalize_scores(self, results: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """归一化分数
        将不同检索方法的分数归一化到相同范围
        Args:
            results: 检索结果列表
        Returns:
            List[Dict[str, Any]]: 分数归一化后的结果列表
        """
        if not results:
            return results

        # 按类型分组分数
        es_scores = [r["score"] for r in results if r.get("retrieval_type") == "es"]
        semantic_scores = [r["score"] for r in results if r.get("retrieval_type") == "semantic"]

        # 归一化ES分数（越大越好）
        if es_scores:
            es_min, es_max = min(es_scores), max(es_scores)
            es_range = es_max - es_min if es_max != es_min else 1
            for result in results:
                if result.get("retrieval_type") == "es":
                    result["normalized_score"] = (result["score"] - es_min) / es_range

        # 归一化语义检索分数（距离越小越好，转换为相似度）
        if semantic_scores:
            sem_min, sem_max = min(semantic_scores), max(semantic_scores)
            sem_range = sem_max - sem_min if sem_max != sem_min else 1
            for result in results:
                if result.get("retrieval_type") == "semantic":
                    result["normalized_score"] = 1 - ((result["score"] - sem_min) / sem_range)

        # 设置默认分数并排序
        for result in results:
            result.setdefault("normalized_score", 0.5)
        results.sort(key=lambda x: x.get("normalized_score", 0), reverse=True)
        return results

    def _generate_answer(self, question: str, contexts: List[str]) -> str:
        """使用大语言模型生成答案
        根据检索到的上下文生成问题的答案
        Args:
            question: 用户问题
            contexts: 检索到的上下文列表
        Returns:
            str: 生成的答案
        """
        context_text = "\n\n".join([f"片段{i+1}:\n{ctx}" for i, ctx in enumerate(contexts)])
        prompt = f"""
你是专业问答助手，需严格按以下规则回答：
1. 从参考内容中提取所有与问题相关的关键信息（如步骤、特点、定义），不遗漏任何要点。
2. 答案结构需与参考内容的逻辑一致（如步骤按顺序，特点分点）。
3. 长度尽量接近参考内容的详细程度（避免过简或冗余）。
4. 仅使用参考内容中的术语，不编造信息。
5. 答案长度建议为参考内容的 80%-120%。

【参考内容】：
{context_text}

【用户问题】：
{question}
""".strip()

        try:
            resp = Generation.call(
                model=self.llm_model,
                messages=[{"role": "user", "content": prompt}],
                result_format='message'
            )
            
            if resp.status_code == 200:
                output = resp.output
                if hasattr(output, 'choices') and output.choices:
                    return output.choices[0].message.content
                elif hasattr(output, 'text'):
                    return output.text
                elif isinstance(output, str):
                    return output
                return str(output) if output else "无法解析响应"
            raise Exception(f"生成答案失败: {resp.message}")
        except Exception as e:
            return f"生成答案时出错: {e}"

    def query(self, question: str, top_k: int = 5, verbose: bool = True) -> Dict[str, Any]:
        """完整的查询流程
        执行完整的RAG查询流程：检索->合并->生成答案
        Args:
            question: 用户问题
            top_k: 返回结果数量
            verbose: 是否打印详细信息
        Returns:
            Dict[str, Any]: 查询结果
        """
        if verbose:
            print(f"\n[查询] 问题: {question}")

        # 执行检索
        es_results = self.es_search(question, top_k)
        semantic_results = self.semantic_search(question, top_k)
        if verbose:
            print(f"[查询] ES检索到 {len(es_results)} 条，语义检索到 {len(semantic_results)} 条")

        # 合并、归一化、筛选
        merged_results = self.merge_and_deduplicate(es_results, semantic_results)
        normalized_results = self.normalize_scores(merged_results)
        high_quality_contexts = [r for r in normalized_results if r.get("normalized_score", 0) > 0.7][:4]

        if verbose:
            print(f"[查询] 合并后 {len(merged_results)} 条，高质量 {len(high_quality_contexts)} 条")
            print("[查询] 生成答案...")

        answer = self._generate_answer(question, [r["text"] for r in high_quality_contexts])

        return {
            "question": question,
            "answer": answer,
            "contexts": normalized_results[:5],
            "total_contexts": len(normalized_results)
        }

    def evaluate_answer_vs_contexts(self, generated_answer: str, contexts: List[Dict[str, Any]], top_k: int = 5) -> Dict[str, Any]:
        """评估生成答案与检索上下文的相似度（无标注场景）
        Args:
            generated_answer: 大模型生成的答案
            contexts: 检索到的上下文结果列表（含 text）
            top_k: 统计Top-K平均
        Returns:
            指标字典：每个上下文相似度、最大值、均值、Top-K均值
        """
        try:
            if not generated_answer or not contexts:
                return {"max_cosine": 0.0, "mean_cosine": 0.0, "topk_mean_cosine": 0.0, "details": []}

            answer_vec = np.array(self._embed_query(self.preprocess_text(generated_answer)))
            sims = []
            for ctx in contexts:
                ctx_text = self.preprocess_text(ctx.get("text", "") or "")
                if not ctx_text:
                    sims.append(0.0)
                    continue
                ctx_vec = np.array(self._embed_query(ctx_text))
                na, nb = np.linalg.norm(answer_vec), np.linalg.norm(ctx_vec)
                cosine = float(np.dot(answer_vec, ctx_vec) / (na * nb)) if na and nb else 0.0
                sims.append(cosine)

            sims_sorted = sorted(sims, reverse=True)
            k = min(top_k, len(sims_sorted))
            return {
                "max_cosine": float(sims_sorted[0]) if sims_sorted else 0.0,
                "mean_cosine": float(sum(sims) / len(sims)) if sims else 0.0,
                "topk_mean_cosine": float(sum(sims_sorted[:k]) / k) if k > 0 else 0.0,
                "details": [{"text": contexts[i].get("text", ""), "cosine": float(sims[i])} for i in range(len(sims))]
            }
        except Exception as e:
            return {"max_cosine": 0.0, "mean_cosine": 0.0, "topk_mean_cosine": 0.0, "error": str(e), "details": []}

    # 评估（成体系的离线评估）相关方法已移除


# FastAPI路由
from fastapi import APIRouter
from pydantic import BaseModel

router = APIRouter()
rag_system = None


def get_rag_system():
    """获取RAG系统实例（单例模式）
    确保全局只有一个RAG系统实例
    Returns:
        RAGSystem: RAG系统实例
    """
    global rag_system
    if rag_system is None:
        rag_system = RAGSystem()
    return rag_system


class QueryRequest(BaseModel):
    """查询请求模型"""
    question: str


class QueryResponse(BaseModel):
    """查询响应模型"""
    question: str
    answer: str
    contexts: List[Dict[str, Any]]
    total_contexts: int


@router.post("/query", response_model=QueryResponse)
async def query_rag(request: QueryRequest):
    """RAG查询接口
    提供RAG查询的API接口
    Args:
        request: 查询请求
    Returns:
        QueryResponse: 查询响应
    """
    rag = get_rag_system()
    return rag.query(request.question)


if __name__ == "__main__":
    print("=" * 50)
    print("RAG系统测试")
    print("=" * 50)

    # 初始化RAG系统
    print("\n[初始化] 正在初始化RAG系统...")
    try:
        rag = RAGSystem(api_key="sk-2c036d80d14a443f8769e072bb09a2e1")
        print("[初始化] RAG系统初始化成功\n")
    except Exception as e:
        print(f"[初始化] 失败: {e}")
        exit(1)

    # 1. 加载PDF文档
    print("=" * 50)
    print("步骤1: 加载PDF文档")
    print("=" * 50)
    pdf_paths = ["菜谱.pdf", "redbook.pdf"]
    documents = rag.load_documents(pdf_paths)
    print(f"共加载 {len(documents)} 个文档\n")

    # 2. 分割文档
    print("=" * 50)
    print("步骤2: 分割文档")
    print("=" * 50)
    chunks = rag.split_documents(documents)
    print(f"共生成 {len(chunks)} 个文档块\n")

    # 3. 向量化并存储
    print("=" * 50)
    print("步骤3: 向量化并存储到Milvus")
    print("=" * 50)
    try:
        rag.vectorize_and_store(chunks)
        print("向量化存储完成\n")
    except Exception as e:
        print(f"向量化存储失败: {e}\n")

    # 4. 同步到Elasticsearch
    print("=" * 50)
    print("步骤4: 同步到Elasticsearch")
    print("=" * 50)
    try:
        rag.sync_to_elasticsearch(chunks)
        print("ES同步完成\n")
    except Exception as e:
        print(f"ES同步失败: {e}\n")

    # 5. 测试查询
    print("=" * 50)
    print("步骤5: 测试查询")
    print("=" * 50)
    for question in ["如何制作鱼香茄子？", "小红书的特点是什么？"]:
        print("\n" + "-" * 50)
        result = rag.query(question)
        print(f"\n问题: {result['question']}")
        print(f"答案: {result['answer']}")
        print(f"上下文数量: {result['total_contexts']}")
        print("-" * 50)

    # 原“步骤6: 评估RAG系统”已移除

    print("\n" + "=" * 50)
    print("测试完成")
    print("=" * 50)

    # 7. 交互式问答与相似度评估（无标注场景）
    print("\n" + "=" * 50)
    print("步骤7: 交互模式（输入问题，回车发送；输入 exit 退出）")
    print("=" * 50)
    while True:
        try:
            user_q = input("\n问题> ").strip()
        except EOFError:
            break
        if not user_q or user_q.lower() in ("exit", "quit", ":q"):
            break
        qr = rag.query(user_q, verbose=True)
        print("\n—— 相似度评估（答案 vs 检索chunk）——")
        metrics = rag.evaluate_answer_vs_contexts(qr["answer"], qr["contexts"], top_k=5)
        if "error" in metrics and metrics["error"]:
            print(f"[评估错误] {metrics['error']}")
        print(f"max_cosine: {metrics['max_cosine']:.3f} | mean_cosine: {metrics['mean_cosine']:.3f} | top5_mean: {metrics['topk_mean_cosine']:.3f}")
        for i, d in enumerate(sorted(metrics["details"], key=lambda x: x["cosine"], reverse=True), 1):
            preview = (d['text'][:80] + "…") if len(d['text']) > 80 else d['text']
            print(f"  [{i}] cosine={d['cosine']:.3f} | {preview}")
