"""
RAG系统实现
使用LangChain、Milvus、Elasticsearch和通义千问构建检索增强生成系统
"""

import os
import sys
import re
from typing import List, Dict, Any
from pathlib import Path

from langchain_community.llms.tongyi import Tongyi

# 添加项目根目录到路径（用于直接运行测试）
if __name__ == "__main__":
    project_root = Path(__file__).parent.parent
    sys.path.insert(0, str(project_root))

# LangChain相关
from langchain_community.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.embeddings import OllamaEmbeddings, DashScopeEmbeddings
from langchain_community.llms import Ollama

# Milvus相关
from pymilvus import (
    connections,
    utility,
    FieldSchema,
    CollectionSchema,
    DataType,
    Collection,
)

# Elasticsearch相关
from elasticsearch import Elasticsearch
from elasticsearch_dsl import Document, Text, Keyword, Index

# 配置
try:
    from config import settings
except ImportError:
    # 如果无法导入，使用环境变量
    class Settings:
        MILVUS_HOST = os.getenv("MILVUS_HOST", "localhost")
        MILVUS_PORT = int(os.getenv("MILVUS_PORT", "19530"))
        MILVUS_COLLECTION_NAME = os.getenv("MILVUS_COLLECTION_NAME", "rag_documents")
        ES_HOST = os.getenv("ES_HOST", "localhost")
        ES_PORT = int(os.getenv("ES_PORT", "9200"))
        ES_INDEX_NAME = os.getenv("ES_INDEX_NAME", "rag_documents")
    settings = Settings()


class RAGSystem:
    """RAG系统主类"""

    def __init__(self):
        """初始化RAG系统"""
        # 获取DashScope API Key - 使用硬编码方式
        # 注意：这种方式存在安全风险，仅用于开发测试环境
        self.dashscope_api_key = "sk-6d79ea6c43f34f77894aae35bc2be7a5"
        print("[警告] 正在使用硬编码的API密钥，仅适用于开发测试环境")
        
        # 初始化文本分割器
        # 优化chunk_size和overlap以确保相关内容不会被分割
        self.text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=1000,  # 增大chunk大小以包含更多上下文
            chunk_overlap=200,  # 增加重叠以确保上下文连贯性
            length_function=len,
            separators=["\n\n", "\n", "。", "！", "？", "；", "，", ".", "!", "?", ";", ",", " ", ""]
        )

        # 初始化向量化模型 (使用阿里云DashScope的text-embedding-v4模型)
        self.embeddings = DashScopeEmbeddings(
            model="text-embedding-v4",
            dashscope_api_key=self.dashscope_api_key
        )

        # 初始化语言模型 (使用阿里云通义千问模型)
        self.llm = Tongyi(
            model_name="qwen-plus",  # 或者使用 "qwen-turbo" 或 "qwen-max"
            dashscope_api_key=self.dashscope_api_key
        )

        # 获取配置（优先使用环境变量，其次使用settings）
        self.milvus_collection_name = os.getenv("MILVUS_COLLECTION_NAME") or getattr(settings, "MILVUS_COLLECTION_NAME", "rag_documents")
        self.milvus_host = os.getenv("MILVUS_HOST") or getattr(settings, "MILVUS_HOST", "localhost")
        self.milvus_port = int(os.getenv("MILVUS_PORT") or getattr(settings, "MILVUS_PORT", "19530"))
        self.es_index_name = os.getenv("ES_INDEX_NAME") or getattr(settings, "ES_INDEX_NAME", "rag_documents")
        self.es_host = os.getenv("ES_HOST") or getattr(settings, "ES_HOST", "localhost")
        self.es_port = int(os.getenv("ES_PORT") or getattr(settings, "ES_PORT", "9200"))

        # 初始化Milvus连接
        self._init_milvus()

        # 初始化Elasticsearch连接
        self._init_elasticsearch()

    def _init_milvus(self):
        """初始化Milvus向量数据库"""
        try:
            # 连接Milvus
            connections.connect(
                alias="default",
                host=self.milvus_host,
                port=self.milvus_port
            )
            print(f"[Milvus] 已连接到 {self.milvus_host}:{self.milvus_port}")

            # 动态检测向量维度（通过测试embedding）
            test_embedding = self.embeddings.embed_query("test")
            embedding_dim = len(test_embedding)
            print(f"[Milvus] 检测到向量维度: {embedding_dim}")

            # 检查集合是否存在
            if utility.has_collection(self.milvus_collection_name):
                # 检查现有集合的维度是否匹配
                try:
                    existing_collection = Collection(self.milvus_collection_name)
                    # 获取集合的schema信息（不需要加载）
                    schema = existing_collection.schema
                    existing_dim = None
                    for field in schema.fields:
                        if field.name == "embedding":
                            # 获取向量字段的维度
                            existing_dim = field.params.get("dim")
                            break
                    
                    if existing_dim is None:
                        print(f"[Milvus] 无法获取现有集合的维度，删除并重新创建...")
                        utility.drop_collection(self.milvus_collection_name)
                    elif existing_dim != embedding_dim:
                        print(f"[Milvus] 检测到已存在的集合 '{self.milvus_collection_name}'，维度不匹配（现有: {existing_dim}, 需要: {embedding_dim}），正在删除...")
                        utility.drop_collection(self.milvus_collection_name)
                        print(f"[Milvus] 已删除旧集合 '{self.milvus_collection_name}'")
                    else:
                        print(f"[Milvus] 检测到已存在的集合 '{self.milvus_collection_name}'，维度匹配({embedding_dim})，直接使用")
                        self.milvus_collection = existing_collection
                        # 加载集合到内存
                        self.milvus_collection.load()
                        print("[Milvus] 集合已加载到内存")
                        return
                except Exception as e:
                    print(f"[Milvus] 检查现有集合时出错: {e}，删除并重新创建...")
                    try:
                        utility.drop_collection(self.milvus_collection_name)
                    except:
                        pass

            # 如果集合不存在或已被删除，创建新集合
            if not utility.has_collection(self.milvus_collection_name):
                # 创建新的集合
                fields = [
                    FieldSchema(name="id", dtype=DataType.INT64, is_primary=True, auto_id=True),
                    FieldSchema(name="text", dtype=DataType.VARCHAR, max_length=10000),
                    FieldSchema(name="embedding", dtype=DataType.FLOAT_VECTOR, dim=embedding_dim),
                    FieldSchema(name="source", dtype=DataType.VARCHAR, max_length=500),
                    FieldSchema(name="chunk_index", dtype=DataType.INT64),
                ]
                schema = CollectionSchema(fields=fields, description="RAG文档向量集合")
                self.milvus_collection = Collection(
                    name=self.milvus_collection_name,
                    schema=schema
                )
                # 创建索引
                index_params = {
                    "metric_type": "L2",
                    "index_type": "IVF_FLAT",
                    "params": {"nlist": 1024}
                }
                self.milvus_collection.create_index(
                    field_name="embedding",
                    index_params=index_params
                )
                print(f"[Milvus] 集合 '{self.milvus_collection_name}' 创建成功，维度: {embedding_dim}")

            # 加载集合到内存
            self.milvus_collection.load()
            print("[Milvus] 集合已加载到内存")

        except Exception as e:
            print(f"[Milvus] 初始化失败: {e}")
            print("[Milvus] 提示: 请确保Milvus服务已启动，或设置环境变量 MILVUS_HOST 和 MILVUS_PORT")
            import traceback
            traceback.print_exc()
            # 不抛出异常，允许测试继续
            self.milvus_collection = None

    def _init_elasticsearch(self):
        """初始化Elasticsearch"""
        try:
            # 连接Elasticsearch
            self.es_client = Elasticsearch(
                [f"http://{self.es_host}:{self.es_port}"],
                request_timeout=30
            )

            # 检查连接
            if not self.es_client.ping():
                raise Exception("无法连接到Elasticsearch")

            print(f"[ES] 已连接到 {self.es_host}:{self.es_port}")

            # 创建索引（如果不存在）
            if not self.es_client.indices.exists(index=self.es_index_name):
                # 定义索引映射（不使用IK分词器，使用标准分词器）
                mapping = {
                    "mappings": {
                        "properties": {
                            "text": {"type": "text"},
                            "source": {"type": "keyword"},
                            "chunk_index": {"type": "integer"},
                        }
                    }
                }
                self.es_client.indices.create(index=self.es_index_name, body=mapping)
                print(f"[ES] 索引 '{self.es_index_name}' 创建成功")
            else:
                print(f"[ES] 索引 '{self.es_index_name}' 已存在")

        except Exception as e:
            print(f"[ES] 初始化失败: {e}")
            print("[ES] 提示: 请确保Elasticsearch服务已启动，或设置环境变量 ES_HOST 和 ES_PORT")
            # 不抛出异常，允许测试继续
            self.es_client = None

    def load_documents(self, pdf_paths: List[str]) -> List[Dict[str, Any]]:
        """
        加载PDF文档

        Args:
            pdf_paths: PDF文件路径列表

        Returns:
            文档列表，每个文档包含page_content和metadata
        """
        all_documents = []

        for pdf_path in pdf_paths:
            # 处理相对路径
            if not os.path.isabs(pdf_path):
                pdf_path = os.path.join(os.path.dirname(__file__), pdf_path)
            
            # 处理中文路径编码问题
            pdf_path = pdf_path.encode('utf-8').decode('utf-8')
                
            if not os.path.exists(pdf_path):
                print(f"[警告] 文件不存在: {pdf_path}")
                continue

            print(f"[加载] 正在加载: {pdf_path}")
            try:
                # 使用PyPDFLoader加载PDF
                loader = PyPDFLoader(pdf_path)
                documents = loader.load()
                
                # 添加来源信息
                for doc in documents:
                    doc.metadata["source"] = pdf_path
                
                all_documents.extend(documents)
                print(f"[加载] 成功加载 {len(documents)} 页")
                
            except Exception as e:
                print(f"[加载] 加载失败 {pdf_path}: {e}")
                continue

        return all_documents

    def preprocess_text(self, text: str) -> str:
        """
        数据预处理和清洗

        Args:
            text: 原始文本

        Returns:
            清洗后的文本
        """
        # 处理可能的编码问题
        if isinstance(text, bytes):
            text = text.decode('utf-8')
        
        # 移除多余的空白字符，但保留段落结构
        text = re.sub(r'[ \t]+', ' ', text)  # 将多个空格或制表符替换为单个空格
        text = re.sub(r'\n{3,}', '\n\n', text)  # 将3个或更多换行符替换为2个换行符
        
        # 保留中文、英文、数字和基本标点符号，移除特殊控制字符
        text = re.sub(r'[\x00-\x08\x0b\x0c\x0e-\x1f\x7f-\x9f]', '', text)
        
        # 移除过短的段落
        text = text.strip()

        return text

    def split_documents(self, documents: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """
        分割文档

        Args:
            documents: 文档列表

        Returns:
            分割后的文档块列表
        """
        print("[分割] 开始分割文档...")

        # 预处理和分割
        processed_chunks = []
        for idx, doc in enumerate(documents):
            # 预处理文本
            cleaned_text = self.preprocess_text(doc.page_content)

            if len(cleaned_text) < 10:  # 跳过过短的文本
                continue

            # 分割文本
            chunks = self.text_splitter.split_text(cleaned_text)

            for chunk_idx, chunk in enumerate(chunks):
                if len(chunk.strip()) < 10:  # 跳过过短的chunk
                    continue

                processed_chunks.append({
                    "text": chunk,
                    "source": doc.metadata.get("source", "unknown"),
                    "page": doc.metadata.get("page", 0),
                    "chunk_index": chunk_idx
                })

        print(f"[分割] 文档分割完成，共生成 {len(processed_chunks)} 个chunk")
        return processed_chunks

    def vectorize_and_store(self, chunks: List[Dict[str, Any]]):
        """
        向量化并存储到Milvus

        Args:
            chunks: 文档块列表
        """
        if self.milvus_collection is None:
            print("[向量化] Milvus未初始化，跳过向量存储")
            return

        print("[向量化] 开始向量化文档...")

        # 批量向量化（DashScope限制每次最多10条）
        texts = [chunk["text"] for chunk in chunks]
        batch_size = 10  # DashScope限制每次最多10条
        embeddings = []

        for i in range(0, len(texts), batch_size):
            batch_texts = texts[i:i + batch_size]
            batch_embeddings = self.embeddings.embed_documents(batch_texts)
            embeddings.extend(batch_embeddings)
            print(f"[向量化] 已处理 {min(i + batch_size, len(texts))}/{len(texts)} 条")

        print(f"[向量化] 向量化完成，共 {len(embeddings)} 个向量")

        # 检查向量维度是否正确
        if len(embeddings) > 0:
            print(f"[向量化] 第一个向量的维度: {len(embeddings[0])}")

        # 准备Milvus数据
        milvus_data = []
        for i, chunk in enumerate(chunks):
            # 确保向量维度正确
            if i < len(embeddings):
                embedding = embeddings[i]
                # 验证向量维度（与集合维度匹配）
                # 注意：向量维度应该已经在_init_milvus中通过动态检测设置了

                milvus_data.append({
                    "text": chunk["text"][:10000],  # 限制长度
                    "embedding": embedding,
                    "source": chunk["source"][:500],
                    "chunk_index": chunk["chunk_index"]
                })

        # 插入Milvus
        print("[Milvus] 开始插入向量数据...")
        if milvus_data:
            insert_result = self.milvus_collection.insert(milvus_data)
            self.milvus_collection.flush()
            print(f"[Milvus] 插入完成，共 {len(insert_result.primary_keys)} 条记录")
        else:
            print("[Milvus] 没有数据需要插入")

    def sync_to_elasticsearch(self, chunks: List[Dict[str, Any]]):
        """
        同步数据到Elasticsearch

        Args:
            chunks: 文档块列表
        """
        if self.es_client is None:
            print("[ES] Elasticsearch未初始化，跳过ES同步")
            return

        print("[ES] 开始同步数据到Elasticsearch...")

        for idx, chunk in enumerate(chunks):
            doc_body = {
                "text": chunk["text"],
                "source": chunk["source"],
                "chunk_index": chunk["chunk_index"]
            }

            self.es_client.index(
                index=self.es_index_name,
                id=idx,
                body=doc_body
            )

        # 刷新索引
        self.es_client.indices.refresh(index=self.es_index_name)
        print(f"[ES] 同步完成，共 {len(chunks)} 条记录")

    def es_search(self, query: str, top_k: int = 5) -> List[Dict[str, Any]]:
        """
        ES检索

        Args:
            query: 查询问题
            top_k: 返回结果数量

        Returns:
            检索结果列表
        """
        if self.es_client is None:
            print("[ES检索] Elasticsearch未初始化")
            return []

        try:
            # 使用更复杂的查询结构提高匹配精度
            search_body = {
                "query": {
                    "bool": {
                        "should": [
                            {
                                "match": {
                                    "text": {
                                        "query": query,
                                        "boost": 2.0
                                    }
                                }
                            },
                            {
                                "match_phrase": {
                                    "text": {
                                        "query": query,
                                        "boost": 3.0
                                    }
                                }
                            }
                        ]
                    }
                },
                "size": top_k
            }

            response = self.es_client.search(
                index=self.es_index_name,
                body=search_body
            )

            results = []
            for hit in response["hits"]["hits"]:
                results.append({
                    "text": hit["_source"]["text"],
                    "source": hit["_source"]["source"],
                    "score": hit["_score"],
                    "chunk_index": hit["_source"]["chunk_index"]
                })

            print(f"[ES检索] 检索到 {len(results)} 条结果")
            return results

        except Exception as e:
            print(f"[ES检索] 错误: {e}")
            return []

    def semantic_search(self, query: str, top_k: int = 5) -> List[Dict[str, Any]]:
        """
        语义检索（Milvus向量检索）

        Args:
            query: 查询问题
            top_k: 返回结果数量

        Returns:
            检索结果列表
        """
        if self.milvus_collection is None:
            print("[语义检索] Milvus未初始化")
            return []

        try:
            # 向量化查询问题
            query_embedding = self.embeddings.embed_query(query)
            print(f"[语义检索] 查询向量化完成，维度: {len(query_embedding)}")

            # 在Milvus中搜索
            search_params = {"metric_type": "L2", "params": {"nprobe": 16}}  # 增加nprobe以提高搜索精度

            results = self.milvus_collection.search(
                data=[query_embedding],
                anns_field="embedding",
                param=search_params,
                limit=top_k * 2,  # 增加检索数量以提高召回率
                output_fields=["text", "source", "chunk_index"]
            )

            # 处理结果
            semantic_results = []
            for hit_list in results:
                for hit in hit_list:
                    semantic_results.append({
                        "text": hit.entity.get("text"),
                        "source": hit.entity.get("source"),
                        "score": hit.distance,  # L2距离，越小越相似
                        "chunk_index": hit.entity.get("chunk_index")
                    })

            print(f"[语义检索] 检索到 {len(semantic_results)} 条结果")
            return semantic_results

        except Exception as e:
            print(f"[语义检索] 错误: {e}")
            import traceback
            traceback.print_exc()
            return []

    def merge_and_deduplicate(self, es_results: List[Dict], semantic_results: List[Dict]) -> List[Dict[str, Any]]:
        """
        合并去重

        Args:
            es_results: ES检索结果
            semantic_results: 语义检索结果

        Returns:
            合并去重后的结果
        """
        # 使用文本内容作为去重键
        seen_texts = set()
        merged_results = []

        # 合并结果
        all_results = []

        # ES结果（关键词匹配）
        for result in es_results:
            all_results.append({
                **result,
                "retrieval_type": "es"
            })

        # 语义检索结果（向量相似度）
        for result in semantic_results:
            all_results.append({
                **result,
                "retrieval_type": "semantic"
            })

        # 按分数排序，优先保留高分结果
        all_results.sort(key=lambda x: x.get("score", 0), reverse=True)

        # 改进去重逻辑：使用更长的文本片段作为键
        for result in all_results:
            # 使用更长的文本片段（前200个字符）作为去重键
            text_key = result["text"][:200].strip()
            if text_key not in seen_texts and len(text_key) > 10:  # 过滤过短的文本
                seen_texts.add(text_key)
                merged_results.append(result)

        print(f"[合并去重] 合并前: {len(all_results)} 条, 合并后: {len(merged_results)} 条")
        return merged_results

    def normalize_scores(self, results: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """
        归一化分数

        Args:
            results: 检索结果列表

        Returns:
            归一化后的结果
        """
        if not results:
            return results

        # 提取所有分数
        es_scores = [r["score"] for r in results if r.get("retrieval_type") == "es"]
        semantic_scores = [r["score"] for r in results if r.get("retrieval_type") == "semantic"]

        # 归一化ES分数（ES分数越大越好，转换为0-1）
        if es_scores:
            es_max = max(es_scores)
            es_min = min(es_scores)
            es_range = es_max - es_min if es_max != es_min else 1

            for result in results:
                if result.get("retrieval_type") == "es":
                    result["normalized_score"] = (result["score"] - es_min) / es_range

        # 归一化语义检索分数（L2距离越小越好，转换为0-1，分数越大越相似）
        if semantic_scores:
            semantic_max = max(semantic_scores)
            semantic_min = min(semantic_scores)
            semantic_range = semantic_max - semantic_min if semantic_max != semantic_min else 1

            for result in results:
                if result.get("retrieval_type") == "semantic":
                    # 距离越小越好，所以用1减去归一化距离
                    result["normalized_score"] = 1 - ((result["score"] - semantic_min) / semantic_range)

        # 计算综合分数（简单平均）
        for result in results:
            if "normalized_score" not in result:
                result["normalized_score"] = 0.5  # 默认分数

        # 按归一化分数排序
        results.sort(key=lambda x: x.get("normalized_score", 0), reverse=True)

        return results

    def _generate_answer(self, question: str, contexts: List[str]) -> str:
        """
        使用大语言模型生成答案

        Args:
            question: 用户问题
            contexts: 检索到的上下文列表

        Returns:
            生成的答案
        """
        # 构建提示词
        context_text = "\n\n".join([f"片段{i+1}:\n{ctx}" for i, ctx in enumerate(contexts)])
        prompt = f"""
你是一个小红书百科小助手，记录菜谱、手工制作等知识点，请基于提供的文档内容回答用户问题。

【参考内容】：
{context_text}

【用户问题】：
{question}

【回答要求】：
1. 严格基于参考内容回答，不要编造或推测
2. 如果参考内容中没有相关信息，请明确说明"参考内容中未提及此信息"
3. 回答应简洁明了，条理清晰
4. 如有多个要点，请分点列出
5. 如果涉及具体制作步骤，可引用相关内容
6. 回答完成后，可以提供一些相关的制作建议
""".strip()

        try:
            # 使用Ollama生成答案
            response = self.llm.invoke(prompt)
            return response
        except Exception as e:
            return f"生成答案时出错: {e}"

    def query(self, question: str, top_k: int = 5, verbose: bool = True) -> Dict[str, Any]:
        """
        完整的查询流程

        Args:
            question: 用户问题
            top_k: 检索结果数量
            verbose: 是否打印详细信息

        Returns:
            查询结果，包含答案和检索到的上下文
        """
        # 处理可能的编码问题
        if isinstance(question, bytes):
            question = question.decode('utf-8')

        if verbose:
            print(f"\n[查询] 问题: {question}")

        # 1. ES检索
        if verbose:
            print("[查询] 执行ES检索...")
        es_results = self.es_search(question, top_k * 2)  # 增加检索数量
        if verbose:
            print(f"[查询] ES检索到 {len(es_results)} 条结果")

        # 2. 语义检索
        if verbose:
            print("[查询] 执行语义检索...")
        semantic_results = self.semantic_search(question, top_k * 2)  # 增加检索数量
        if verbose:
            print(f"[查询] 语义检索到 {len(semantic_results)} 条结果")

        # 3. 合并去重
        if verbose:
            print("[查询] 合并去重...")
        merged_results = self.merge_and_deduplicate(es_results, semantic_results)
        if verbose:
            print(f"[查询] 合并后剩余 {len(merged_results)} 条结果")

        # 4. 归一化
        if verbose:
            print("[查询] 归一化分数...")
        normalized_results = self.normalize_scores(merged_results)

        # 5. 生成答案
        if verbose:
            print("[查询] 生成答案...")
        answer = self._generate_answer(question, [r["text"] for r in normalized_results])

        return {
            "question": question,
            "answer": answer,
            "contexts": normalized_results[:5],  # 返回前5个上下文
            "total_contexts": len(normalized_results)
        }

    def evaluate_retrieval(self, question: str, relevant_texts: List[str], top_k: int = 5) -> Dict[str, float]:
        """
        评估检索准确率

        Args:
            question: 查询问题
            relevant_texts: 相关文档文本列表（标准答案）
            top_k: 检索的top_k数量

        Returns:
            评估指标字典
        """
        # 执行检索
        es_results = self.es_search(question, top_k=top_k)
        semantic_results = self.semantic_search(question, top_k=top_k)
        merged_results = self.merge_and_deduplicate(es_results, semantic_results)
        normalized_results = self.normalize_scores(merged_results)

        # 获取检索到的文本
        retrieved_texts = [r["text"] for r in normalized_results[:top_k]]

        # 计算精确匹配的相关文档数量
        relevant_set = set([text.lower().strip()[:100] for text in relevant_texts])
        retrieved_set = set([text.lower().strip()[:100] for text in retrieved_texts])

        # Precision@K: 检索到的文档中有多少是相关的
        intersection = relevant_set.intersection(retrieved_set)
        precision = len(intersection) / len(retrieved_set) if retrieved_set else 0.0

        # Recall@K: 相关文档中有多少被检索到了
        recall = len(intersection) / len(relevant_set) if relevant_set else 0.0

        # F1分数
        f1 = 2 * precision * recall / (precision + recall) if (precision + recall) > 0 else 0.0

        # MRR (Mean Reciprocal Rank): 第一个相关文档的排名倒数
        mrr = 0.0
        for idx, text in enumerate(retrieved_texts, 1):
            text_key = text.lower().strip()[:100]
            if text_key in relevant_set:
                mrr = 1.0 / idx
                break

        return {
            "precision@k": precision,
            "recall@k": recall,
            "f1_score": f1,
            "mrr": mrr,
            "relevant_count": len(relevant_set),
            "retrieved_count": len(retrieved_set),
            "intersection_count": len(intersection)
        }

    def evaluate_answer_quality(self, generated_answer: str, reference_answer: str) -> Dict[str, float]:
        """
        评估答案质量（使用语义相似度）

        Args:
            generated_answer: 生成的答案
            reference_answer: 标准答案

        Returns:
            评估指标字典
        """
        try:
            # 使用embedding计算语义相似度
            gen_embedding = self.embeddings.embed_query(generated_answer)
            ref_embedding = self.embeddings.embed_query(reference_answer)

            # 计算余弦相似度
            import numpy as np
            gen_vec = np.array(gen_embedding)
            ref_vec = np.array(ref_embedding)

            cosine_sim = np.dot(gen_vec, ref_vec) / (np.linalg.norm(gen_vec) * np.linalg.norm(ref_vec))

            # 计算文本长度比例（惩罚过长或过短的答案）
            length_ratio = min(len(generated_answer), len(reference_answer)) / max(len(generated_answer), len(reference_answer)) if max(len(generated_answer), len(reference_answer)) > 0 else 0.0

            # 综合分数（语义相似度 * 长度比例）
            quality_score = cosine_sim * 0.8 + length_ratio * 0.2

            return {
                "semantic_similarity": float(cosine_sim),
                "length_ratio": float(length_ratio),
                "quality_score": float(quality_score)
            }
        except Exception as e:
            return {
                "semantic_similarity": 0.0,
                "length_ratio": 0.0,
                "quality_score": 0.0,
                "error": str(e)
            }

    def comprehensive_evaluate(self, test_dataset: List[Dict[str, Any]]) -> Dict[str, Any]:
        """
        综合评估RAG系统

        Args:
            test_dataset: 测试数据集，格式为：
                [
                    {
                        "question": "问题",
                        "reference_answer": "标准答案",
                        "relevant_texts": ["相关文档1", "相关文档2", ...]
                    },
                    ...
                ]

        Returns:
            评估报告
        """
        print("\n" + "=" * 60)
        print("开始RAG系统综合评估")
        print("=" * 60)

        total_retrieval_metrics = {
            "precision@k": [],
            "recall@k": [],
            "f1_score": [],
            "mrr": []
        }

        total_answer_metrics = {
            "semantic_similarity": [],
            "quality_score": []
        }

        detailed_results = []

        for idx, test_case in enumerate(test_dataset, 1):
            print(f"\n[评估 {idx}/{len(test_dataset)}] 问题: {test_case['question']}")

            # 1. 检索评估
            relevant_texts = test_case.get("relevant_texts", [])
            retrieval_metrics = self.evaluate_retrieval(
                test_case["question"],
                relevant_texts,
                top_k=5
            )

            # 2. 生成答案（评估时使用静默模式）
            query_result = self.query(test_case["question"], verbose=False)
            generated_answer = query_result["answer"]

            # 3. 答案质量评估
            reference_answer = test_case.get("reference_answer", "")
            answer_metrics = {}
            if reference_answer:
                answer_metrics = self.evaluate_answer_quality(
                    generated_answer,
                    reference_answer
                )

            # 累积指标
            for key in total_retrieval_metrics:
                total_retrieval_metrics[key].append(retrieval_metrics[key])

            if answer_metrics:
                total_answer_metrics["semantic_similarity"].append(answer_metrics["semantic_similarity"])
                total_answer_metrics["quality_score"].append(answer_metrics["quality_score"])

            # 保存详细结果
            detailed_results.append({
                "question": test_case["question"],
                "generated_answer": generated_answer,
                "reference_answer": reference_answer,
                "retrieval_metrics": retrieval_metrics,
                "answer_metrics": answer_metrics
            })

            print(f"  检索准确率: Precision@5={retrieval_metrics['precision@k']:.3f}, Recall@5={retrieval_metrics['recall@k']:.3f}")
            if answer_metrics:
                print(f"  答案质量: 语义相似度={answer_metrics['semantic_similarity']:.3f}, 综合分数={answer_metrics['quality_score']:.3f}")

        # 计算平均指标
        avg_retrieval = {
            f"avg_{key}": sum(values) / len(values) if values else 0.0
            for key, values in total_retrieval_metrics.items()
        }

        avg_answer = {
            f"avg_{key}": sum(values) / len(values) if values else 0.0
            for key, values in total_answer_metrics.items()
        }

        # 生成报告
        report = {
            "total_test_cases": len(test_dataset),
            "retrieval_metrics": avg_retrieval,
            "answer_metrics": avg_answer,
            "detailed_results": detailed_results
        }

        # 打印报告
        print("\n" + "=" * 60)
        print("评估报告")
        print("=" * 60)
        print(f"测试用例总数: {report['total_test_cases']}")
        print("\n【检索性能】")
        print(f"  平均精确率@5: {avg_retrieval['avg_precision@k']:.3f}")
        print(f"  平均召回率@5: {avg_retrieval['avg_recall@k']:.3f}")
        print(f"  平均F1分数: {avg_retrieval['avg_f1_score']:.3f}")
        print(f"  平均MRR: {avg_retrieval['avg_mrr']:.3f}")

        if avg_answer:
            print("\n【答案质量】")
            print(f"  平均语义相似度: {avg_answer['avg_semantic_similarity']:.3f}")
            print(f"  平均综合分数: {avg_answer['avg_quality_score']:.3f}")

        print("=" * 60)

        return report


# FastAPI路由
from fastapi import APIRouter
from pydantic import BaseModel

router = APIRouter()

# 全局RAG系统实例
rag_system = None


def get_rag_system():
    """获取RAG系统实例（单例模式）"""
    global rag_system
    if rag_system is None:
        rag_system = RAGSystem()
    return rag_system


class QueryRequest(BaseModel):
    """查询请求模型"""
    question: str


class QueryResponse(BaseModel):
    """查询响应模型"""
    question: str
    answer: str
    contexts: List[Dict[str, Any]]
    total_contexts: int


@router.post("/query", response_model=QueryResponse)
async def query_rag(request: QueryRequest):
    """RAG查询接口"""
    rag = get_rag_system()
    result = rag.query(request.question)
    return result


if __name__ == "__main__":
    print("=" * 50)
    print("RAG系统测试")
    print("=" * 50)

    # 初始化RAG系统
    print("\n[初始化] 正在初始化RAG系统...")
    try:
        rag = RAGSystem()
        print("[初始化] RAG系统初始化成功\n")
    except Exception as e:
        print(f"[初始化] 失败: {e}")
        import traceback
        traceback.print_exc()
        exit(1)

    # 1. 加载PDF文档
    print("=" * 50)
    print("步骤1: 加载PDF文档")
    print("=" * 50)
    pdf_paths = [
        "菜谱.pdf",
        "redbook.pdf"
    ]
    documents = rag.load_documents(pdf_paths)
    print(f"共加载 {len(documents)} 个文档\n")

    # 如果没有文档，尝试检查文件是否存在
    if len(documents) == 0:
        print("[警告] 未加载到任何文档，请检查文件路径和文件是否存在")
        for pdf_path in pdf_paths:
            full_path = os.path.join(os.path.dirname(__file__), pdf_path) if not os.path.isabs(pdf_path) else pdf_path
            if os.path.exists(full_path):
                print(f"  [检查] 文件存在: {full_path}")
            else:
                print(f"  [检查] 文件不存在: {full_path}")
        exit(1)

    # 2. 分割文档
    print("=" * 50)
    print("步骤2: 分割文档")
    print("=" * 50)
    chunks = rag.split_documents(documents)
    print(f"共生成 {len(chunks)} 个文档块\n")

    if len(chunks) == 0:
        print("[警告] 未生成任何文档块，请检查文档内容")
        exit(1)

    # 3. 向量化并存储到Milvus
    print("=" * 50)
    print("步骤3: 向量化并存储到Milvus")
    print("=" * 50)
    try:
        rag.vectorize_and_store(chunks)
        print("向量化存储完成\n")
    except Exception as e:
        print(f"向量化存储失败: {e}")
        import traceback
        traceback.print_exc()
        exit(1)

    # 4. 同步到Elasticsearch
    print("=" * 50)
    print("步骤4: 同步到Elasticsearch")
    print("=" * 50)
    try:
        rag.sync_to_elasticsearch(chunks)
        print("ES同步完成\n")
    except Exception as e:
        print(f"ES同步失败: {e}")
        import traceback
        traceback.print_exc()
        exit(1)

    # 5. 测试查询
    print("=" * 50)
    print("步骤5: 测试查询")
    print("=" * 50)
    test_questions = [
        "如何制作红烧茄子？",
        "小红书的特点是什么？",
    ]

    for question in test_questions:
        print("\n" + "-" * 50)
        result = rag.query(question)
        print(f"\n问题: {result['question']}")
        print(f"答案: {result['answer']}")
        print(f"上下文数量: {result['total_contexts']}")
        if result['total_contexts'] > 0:
            print("检索到的部分上下文:")
            for i, ctx in enumerate(result['contexts'][:2]):  # 显示前2个上下文
                print(f"  上下文 {i+1}: {ctx['text'][:100]}...")
        print("-" * 50)

    # 6. 评估RAG系统准确率
    print("\n" + "=" * 50)
    print("步骤6: 评估RAG系统准确率")
    print("=" * 50)

    # 构建测试数据集（根据实际PDF内容调整）
    test_dataset = [
        {
            "question": "如何制作红烧茄子？",
            "reference_answer": "制作红烧茄子的步骤包括：准备茄子、调制红烧汁、炒制等步骤。",
            "relevant_texts": [
                "红烧茄子",
                "茄子",
                "制作",
                "菜谱"
            ]
        },
        {
            "question": "小红书的特点是什么？",
            "reference_answer": "小红书是一个年轻人的生活方式平台和消费决策入口，是一个种草平台。",
            "relevant_texts": [
                "小红书",
                "年轻人",
                "生活方式",
                "种草",
                "平台"
            ]
        }
    ]

    # 执行评估
    try:
        evaluation_report = rag.comprehensive_evaluate(test_dataset)

        # 保存评估报告到文件
        import json
        report_file = "rag_evaluation_report.json"
        with open(report_file, "w", encoding="utf-8") as f:
            json.dump(evaluation_report, f, ensure_ascii=False, indent=2)
        print(f"\n评估报告已保存到: {report_file}")

    except Exception as e:
        print(f"\n评估过程出错: {e}")
        import traceback
        traceback.print_exc()

    print("\n" + "=" * 50)
    print("测试完成")
    print("=" * 50)