from typing import List, Dict, Any, Optional
from langchain_ollama import OllamaEmbeddings
from langchain_ollama import OllamaLLM
from app.system_config import settings
from app.exception.rag_exception import RAGException
from app.utils.milvus_store import MilvusStoreWrapper
import logging
import asyncio
import os
import os.path  # 用于提取文件扩展名
from datetime import datetime
from pymilvus import DataType  # 用于Schema检查
from pymilvus import connections

logger = logging.getLogger("Multi-Collection-Query-Rerank")

# -------------------------- 核心配置：集合-扩展名映射（精确匹配） --------------------------
# 键：集合名，值：(目标扩展名列表, 文件类型权重)
COLLECTION_EXT_MAPPING = {
    "doc_documents_milvus": (["doc"], 1.05),    # 匹配file_ext == "doc"
    "docx_documents_milvus": (["docx"], 1.1),   # 匹配file_ext == "docx"
    "excel_documents_milvus": (["xlsx"], 1.15), # 匹配file_ext == "xlsx"
    "json_documents_milvus": (["json"], 1.0),   # 匹配file_ext == "json"
    "pdf_documents_milvus": (["pdf"], 1.2),     # 匹配file_ext == "pdf"（最高权重）
    "txt_documents_milvus": (["txt"], 0.95)     # 匹配file_ext == "txt"
}


class MultiCollectionQueryService:
    def __init__(
            self,
            milvus_host: str = settings.MILVUS_HOST,
            milvus_port: str = settings.MILVUS_PORT,
            ollama_base_url: str = settings.OLLAMA_URL,
            embedding_model: str = settings.OLLAMA_EMBEDDING_MODEL,
            llm_model: str = settings.OLLAMA_LLM_MODEL,
            collections: List[str] = None,
            use_llm_rerank: bool = False
    ):
        self.milvus_host = milvus_host
        self.milvus_port = milvus_port
        self.ollama_base_url = ollama_base_url
        self.embedding_model = embedding_model
        self.llm_model = llm_model
        self.use_llm_rerank = use_llm_rerank
        self.collections = collections or list(COLLECTION_EXT_MAPPING.keys())

        # 核心组件初始化
        self.embeddings = self._init_embeddings()
        self.llm = self._init_llm()
        self.vector_stores = self._init_vector_stores()  # 初始化时检查file_ext字段

    def _init_embeddings(self) -> OllamaEmbeddings:
        """初始化嵌入模型，验证向量维度"""
        try:
            os.environ["OLLAMA_HOST"] = self.ollama_base_url
            embeddings = OllamaEmbeddings(
                model=self.embedding_model,
                base_url=self.ollama_base_url
            )

            test_emb = embeddings.embed_query("劳动合同法 试用期")
            if not (isinstance(test_emb, list) and len(test_emb) > 0 and isinstance(test_emb[0], float)):
                raise RAGException(503, "嵌入模型返回无效向量（非浮点列表）")

            self.embedding_dim = len(test_emb)
            logger.info(f"嵌入模型就绪：{self.embedding_model}（维度：{self.embedding_dim}）")
            return embeddings

        except Exception as e:
            raise RAGException(500, f"嵌入模型初始化失败：{str(e)}")

    def _init_llm(self) -> OllamaLLM:
        """初始化LLM"""
        try:
            llm = OllamaLLM(
                model=self.llm_model,
                base_url=self.ollama_base_url,
                temperature=0.1,
                request_timeout=180.0
            )

            test_resp = llm.invoke("仅返回'LLM就绪'")
            if "LLM就绪" not in test_resp.strip():
                raise RAGException(503, f"LLM测试失败，返回：{test_resp[:30]}")

            logger.info(f"LLM就绪：{self.llm_model}（Rerank模式：{'LLM语义排序' if self.use_llm_rerank else '规则权重排序'}）")
            return llm
        except Exception as e:
            error_msg = str(e)
            if "connection refused" in error_msg.lower():
                error_msg = "Ollama服务未启动（需执行'ollama serve'）"
            elif "model not found" in error_msg.lower():
                error_msg = f"LLM模型不存在（需执行'ollama pull {self.llm_model}'）"
            raise RAGException(503, error_msg)

    def _check_file_ext_field(self, collection_name: str, wrapper) -> bool:
        """检查集合是否存在file_ext字段（用于存储文件扩展名）"""
        coll_schema = wrapper.collection.schema
        # 遍历所有字段，检查是否有file_ext（忽略大小写）
        for field in coll_schema.fields:
            if field.name.lower() == "file_ext":
                # 确认字段类型是String
                if field.dtype == DataType.VARCHAR or field.dtype == DataType.STRING:
                    return True
                else:
                    raise RAGException(
                        500,
                        f"集合{collection_name}的file_ext字段类型错误（需String/VARCHAR，实际为{field.dtype}）"
                    )
        # 字段不存在，抛出异常并提示解决方案
        raise RAGException(
            500,
            f"集合{collection_name}缺少file_ext字段！请先执行以下操作：\n"
            "1. 在插入数据时，从file_name提取扩展名（如'json'）存储到file_ext字段；\n"
            "2. 新增file_ext字段（类型：String，长度：10），更新集合Schema。"
        )

    def _init_vector_stores(self) -> Dict[str, Any]:
        """初始化向量存储，检查file_ext字段，校验维度和索引"""
        vector_stores = {}
        for collection in self.collections:
            if collection not in COLLECTION_EXT_MAPPING:
                logger.warning(f"跳过未配置集合：{collection}")
                continue

            try:
                # 1. 初始化Milvus包装器
                wrapper = MilvusStoreWrapper(
                    is_removed=False,
                    collection_name=collection,
                    max_retries=3,
                    retry_delay=2
                )

                # 2. 关键检查：确保集合存在file_ext字段
                self._check_file_ext_field(collection, wrapper)

                # 3. 校验embedding字段维度
                coll_schema = wrapper.collection.schema
                embedding_field = next(
                    (f for f in coll_schema.fields if f.name == wrapper.vector_field_name),
                    None
                )
                if not embedding_field:
                    raise RAGException(500, f"集合{collection}缺少embedding字段")
                if embedding_field.params.get("dim") != self.embedding_dim:
                    raise RAGException(
                        500,
                        f"维度不匹配！集合{collection}维度：{embedding_field.params.get('dim')}，"
                        f"模型维度：{self.embedding_dim}"
                    )

                # 4. 检查并创建索引
                target_field = wrapper.vector_field_name
                indexes = wrapper.collection.indexes
                index_exists = any(idx.field_name == target_field for idx in indexes)

                if not index_exists:
                    logger.warning(f"集合{collection}的{target_field}字段无索引，创建IVF_FLAT索引...")
                    index_params = {
                        "index_type": "IVF_FLAT",
                        "metric_type": "L2",
                        "params": {"nlist": 128}
                    }
                    wrapper.collection.create_index(
                        field_name=target_field,
                        index_params=index_params,
                        index_name=f"{target_field}_ivf_flat"
                    )
                    wrapper.collection.load()
                    logger.info(f"集合{collection}索引创建完成")

                # 5. 强制加载集合
                wrapper.collection.load()
                entity_count = wrapper.collection.num_entities
                logger.info(
                    f"集合{collection}初始化成功："
                    f"实体数={entity_count}，"
                    f"目标扩展名={COLLECTION_EXT_MAPPING[collection][0]}"
                )

                vector_stores[collection] = wrapper

            except Exception as e:
                logger.error(f"集合{collection}初始化失败：{str(e)}", exc_info=True)
                continue

        if not vector_stores:
            raise RAGException(500, "所有目标集合均初始化失败，无法执行查询")

        return vector_stores

    def _get_file_filter_expr(self, collection_name: str) -> str:
        """生成基于file_ext字段的精确匹配表达式（避开LIKE限制）"""
        if collection_name not in COLLECTION_EXT_MAPPING:
            return ""

        # 获取该集合需要匹配的扩展名（如["doc"]）
        ext_list, _ = COLLECTION_EXT_MAPPING[collection_name]
        # 生成精确匹配表达式（如"file_ext in ['doc']"）
        ext_str = ", ".join([f"'{ext}'" for ext in ext_list])
        return f"file_ext in [{ext_str}]"

    async def _query_single_collection(
            self,
            wrapper,
            collection_name: str,
            query_vector: List[float],
            k: int = 3,
            l2_threshold: float = 3.0,
            user_filter: Optional[Dict[str, Any]] = None
    ) -> Dict[str, Any]:
        """查询单个集合，基于file_ext精确匹配过滤（替代LIKE）"""
        try:
            # 1. 合并过滤条件：file_ext精确匹配 + 用户自定义过滤
            expr_parts = []

            # 自动添加file_ext过滤（核心：精确匹配扩展名）
            file_filter = self._get_file_filter_expr(collection_name)
            if file_filter:
                expr_parts.append(file_filter)
                logger.debug(f"集合{collection_name}自动文件过滤：{file_filter}")

            # 处理用户自定义过滤（如按文件名包含“劳动合同法”）
            if user_filter and isinstance(user_filter, dict):
                for key, value in user_filter.items():
                    # 检查字段是否存在
                    field_names = [f.name.lower() for f in wrapper.collection.schema.fields]
                    if key.lower() not in field_names:
                        logger.warning(f"集合{collection_name}无字段{key}，跳过过滤")
                        continue
                    # 格式化过滤值（支持LIKE前缀匹配，如用户需要“劳动合同法%”）
                    if isinstance(value, str):
                        # 若用户输入包含%，则用LIKE（仅支持前缀）；否则精确匹配
                        if "%" in value:
                            # 低版本Milvus仅支持前缀%，提示用户调整
                            if value.startswith("%"):
                                logger.warning(f"Milvus不支持前缀%，请改用后缀%（如'{value[1:]}%'）")
                                continue
                            expr_parts.append(f"{key} LIKE '{value}'")
                        else:
                            expr_parts.append(f"{key} == '{value}'")
                    elif isinstance(value, (int, float, bool)):
                        expr_parts.append(f"{key} == {value}")

            # 最终过滤表达式（多条件用AND连接）
            expr = " AND ".join(expr_parts) if expr_parts else None
            if expr:
                logger.info(f"集合{collection_name}最终过滤表达式：{expr}")

            # 2. 执行相似性查询（带L2分数）
            doc_score_pairs = wrapper.vector_store.similarity_search_with_score_by_vector(
                embedding=query_vector,
                k=k,
                expr=expr,
                param={"nprobe": 15}
            )

            # 3. 过滤低相似度结果，补充Rerank所需元数据
            formatted_results = []
            for doc, l2_score in doc_score_pairs:
                if l2_score > l2_threshold:
                    logger.debug(f"集合{collection_name}：L2={l2_score:.4f} > 阈值{l2_threshold}，跳过")
                    continue

                # 补充元数据（含文件类型权重）
                doc.metadata.update({
                    "source_collection": collection_name,
                    "l2_score": round(l2_score, 4),
                    "file_type_weight": COLLECTION_EXT_MAPPING[collection_name][1],
                    "content_length": len(doc.page_content.strip()),
                    "query_time": datetime.now().isoformat()
                })

                formatted_results.append({
                    "doc": doc,
                    "l2_score": round(l2_score, 4),
                    "metadata": doc.metadata,
                    "content_preview": doc.page_content.strip()[:150] + "..."
                })

            logger.info(f"集合{collection_name}查询完成：原始{len(doc_score_pairs)}个 → 过滤后{len(formatted_results)}个")
            return {
                "collection": collection_name,
                "result_count": len(formatted_results),
                "results": formatted_results
            }

        except Exception as e:
            logger.error(f"集合{collection_name}查询失败：{str(e)}", exc_info=True)
            raise

    async def query_multiple_collections(
            self,
            query: str,
            k_per_coll: int = 3,
            l2_threshold: float = 3.0,
            user_filter: Optional[Dict[str, Any]] = None
    ) -> Dict[str, Any]:
        """多集合并行查询"""
        try:
            query_vector = self.embeddings.embed_query(query.strip())
            if len(query_vector) != self.embedding_dim:
                raise RAGException(500, f"查询向量维度异常：{len(query_vector)}（预期：{self.embedding_dim}）")
            logger.info(f"查询向量生成成功（查询文本：{query[:50]}...）")
        except Exception as e:
            raise RAGException(500, f"生成查询向量失败：{str(e)}")

        # 并行查询所有集合
        tasks = []
        for collection_name, wrapper in self.vector_stores.items():
            tasks.append(
                self._query_single_collection(
                    wrapper=wrapper,
                    collection_name=collection_name,
                    query_vector=query_vector,
                    k=k_per_coll,
                    l2_threshold=l2_threshold,
                    user_filter=user_filter
                )
            )

        results = await asyncio.gather(*tasks, return_exceptions=True)

        # 整理原始结果
        raw_output = {
            "query": query,
            "total_raw_results": 0,
            "collection_stats": {},
            "raw_results": []
        }

        for idx, (collection_name, _) in enumerate(self.vector_stores.items()):
            if idx >= len(results) or isinstance(results[idx], Exception):
                err = results[idx] if (idx < len(results) and isinstance(results[idx], Exception)) else "任务超时"
                raw_output["collection_stats"][collection_name] = {
                    "status": "error",
                    "error_msg": str(err),
                    "result_count": 0
                }
                logger.error(f"集合{collection_name}查询异常：{str(err)}")
                continue

            coll_res = results[idx]
            raw_output["collection_stats"][collection_name] = {
                "status": "success",
                "result_count": coll_res["result_count"],
                "l2_threshold_used": coll_res.get("l2_threshold_used", 3.0)
            }
            raw_output["raw_results"].extend(coll_res["results"])
            raw_output["total_raw_results"] += coll_res["result_count"]

        logger.info(f"多集合查询完成：共{raw_output['total_raw_results']}个原始结果")
        return raw_output

    def _rerank_by_rules(self, query: str, raw_results: List[Dict[str, Any]], top_n: int = 10) -> List[Dict[str, Any]]:
        """规则化Rerank：L2相似度+文件权重+内容质量"""
        logger.info(f"规则化Rerank：{len(raw_results)}个原始结果 → Top-{top_n}")

        rerank_list = []
        for res in raw_results:
            # L2分数正向转换（L2越小，得分越高）
            base_score = 1.0 / (1.0 + res["l2_score"])
            # 文件类型权重（从配置获取）
            file_weight = res["metadata"]["file_type_weight"]
            # 内容质量权重（过滤过短/过长内容）
            content_len = res["metadata"]["content_length"]
            content_weight = 0.8 if content_len < 100 else 1.05 if 100 <= content_len <= 1000 else 0.95
            # 总得分
            total_score = round(base_score * file_weight * content_weight, 4)

            rerank_list.append({
                "total_score": total_score,
                "l2_score": res["l2_score"],
                "file_type_weight": file_weight,
                "content_weight": content_weight,
                "doc": res["doc"],
                "metadata": res["metadata"],
                "content_preview": res["content_preview"]
            })

        # 按总得分降序排序
        rerank_list.sort(key=lambda x: x["total_score"], reverse=True)
        return rerank_list[:top_n]

    def _rerank_by_llm(self, query: str, raw_results: List[Dict[str, Any]], top_n: int = 10) -> List[Dict[str, Any]]:
        """LLM Rerank：语义相关性排序"""
        logger.info(f"LLM Rerank：{len(raw_results)}个原始结果 → Top-{top_n}")

        # 构造Rerank提示词
        result_strs = []
        for i, res in enumerate(raw_results, 1):
            meta = res["metadata"]
            result_strs.append(
                f"{i}. 来源：{meta['source_collection']} > {meta.get('file_name', '未知文件')}，"
                f"L2相似度：{res['l2_score']}，"
                f"内容：{res['content_preview']}"
            )

        prompt = f"""
        按用户查询“{query}”的语义相关性排序结果（1=最相关），规则：
        1. 优先选择能直接回答查询的法律条文；
        2. 官方文件（如“中华人民共和国XXX法”）优先级高；
        3. 仅返回排序后的序号（英文逗号分隔，如“3,1,2”）。

        待排序结果：
        {chr(10).join(result_strs)}

        排序后的序号：
        """

        # 调用LLM
        try:
            llm_resp = self.llm.invoke(prompt).strip()
            rank_order = [int(x.strip()) for x in llm_resp.split(",") if x.strip().isdigit()]
            if len(rank_order) != len(raw_results) or max(rank_order) > len(raw_results):
                raise ValueError(f"LLM返回无效序号：{llm_resp}")
            logger.info(f"LLM排序序号：{rank_order}")
        except Exception as e:
            logger.error(f"LLM Rerank失败，降级为规则Rerank：{str(e)}")
            return self._rerank_by_rules(query, raw_results, top_n)

        # 重新排序
        rerank_list = []
        for rank_idx, res_idx in enumerate(rank_order, 1):
            res = raw_results[res_idx - 1]
            rerank_list.append({
                "total_score": round(100 - (rank_idx - 1) * 5, 2),
                "llm_rank": rank_idx,
                "l2_score": res["l2_score"],
                "doc": res["doc"],
                "metadata": res["metadata"],
                "content_preview": res["content_preview"]
            })

        return rerank_list[:top_n]

    def rerank_results(self, query: str, raw_results: List[Dict[str, Any]], top_n: int = 10) -> List[Dict[str, Any]]:
        """统一Rerank入口"""
        if not raw_results:
            logger.warning("无原始结果可Rerank")
            return []
        return self._rerank_by_llm(query, raw_results, top_n) if self.use_llm_rerank else self._rerank_by_rules(query, raw_results, top_n)

    def generate_response(self, query: str, reranked_results: List[Dict[str, Any]], max_context_len: int = 4000) -> str:
        """生成LLM响应"""
        if not reranked_results:
            return "未找到与查询相关的法律条文或解释，无法回答该问题。"

        # 拼接上下文
        context_parts = []
        total_len = 0
        for res in reranked_results:
            meta = res["metadata"]
            doc = res["doc"]
            context_part = (
                f"【相关度得分：{res['total_score']:.4f}】"
                f"来源：{meta['source_collection']} > {meta.get('file_name', '未知文件')}，"
                f"内容：{doc.page_content.strip()}\n\n"
            )
            part_len = len(context_part)

            if total_len + part_len > max_context_len:
                logger.warning(f"上下文达上限{max_context_len}，停止拼接")
                break

            context_parts.append(context_part)
            total_len += part_len

        # 构造提示词
        context = "".join(context_parts)
        prompt = f"""
        基于以下法律文件回答查询，要求：
        1. 仅用上下文信息，不编造条文；
        2. 标注每个观点的来源（“来自[集合名]的[文件名]”）；
        3. 冲突时以得分最高的结果为准。

        上下文：
        {context}

        查询：{query}

        回答：
        """

        # 生成响应
        try:
            logger.info(f"生成响应：上下文{total_len}字符，使用{len(context_parts)}个结果")
            response = self.llm.invoke(prompt).strip()
            return response
        except Exception as e:
            err_msg = f"LLM响应失败：{str(e)}"
            logger.error(err_msg, exc_info=True)
            return err_msg

    async def query_rerank_response(
            self,
            query: str,
            k_per_coll: int = 3,
            l2_threshold: float = 3.0,
            user_filter: Optional[Dict[str, Any]] = None,
            rerank_top_n: int = 8
    ) -> Dict[str, Any]:
        """完整流程：查询→Rerank→响应"""
        if not query.strip():
            raise RAGException(400, "查询文本不能为空")

        # 1. 多集合查询
        raw_results = await self.query_multiple_collections(
            query=query,
            k_per_coll=k_per_coll,
            l2_threshold=l2_threshold,
            user_filter=user_filter
        )

        # 2. Rerank
        reranked_results = self.rerank_results(
            query=query,
            raw_results=raw_results["raw_results"],
            top_n=rerank_top_n
        )

        # 3. 生成响应
        response = self.generate_response(query, reranked_results)

        # 4. 格式化结果
        formatted_reranked = []
        for rank_idx, res in enumerate(reranked_results, 1):
            meta = res["metadata"]
            formatted_reranked.append({
                "rank": rank_idx,
                "total_score": res["total_score"],
                "l2_score": res["l2_score"],
                "source_collection": meta["source_collection"],
                "file_name": meta.get("file_name", "未知文件"),
                "file_ext": meta.get("file_ext", "未知"),
                "content_length": meta.get("content_length", 0),
                "content_preview": res["content_preview"],
                "query_time": meta["query_time"]
            })

        return {
            "query": query,
            "query_time": datetime.now().isoformat(),
            "rerank_method": "LLM语义排序" if self.use_llm_rerank else "规则权重排序",
            "total_raw_results": raw_results["total_raw_results"],
            "total_reranked_results": len(formatted_reranked),
            "collection_stats": raw_results["collection_stats"],
            "reranked_results": formatted_reranked,
            "response": response,
            "status": "success" if formatted_reranked else "no_result"
        }

    async def query_rag(self,user_question: str,user_filter: Optional[Dict[str, Any]] = None):
        try:
            logger.info("=" * 60)
            logger.info("初始化多集合查询-Rerank服务")
            logger.info(f"服务初始化成功（可用集合数：{len(self.vector_stores)}）")

            logger.info(f"\n执行查询：{user_question}")
            final_result = await self.query_rerank_response(
                query=user_question,
                k_per_coll=3,
                l2_threshold=3.5,
                user_filter=user_filter,
                rerank_top_n=8
            )

            # 打印结果
            logger.info("\n" + "=" * 60)
            logger.info(f"【查询结果汇总】")
            logger.info(f"查询文本：{final_result['query']}")
            logger.info(f"处理状态：{final_result['status']}")
            logger.info(f"Rerank方式：{final_result['rerank_method']}")
            logger.info(f"原始结果数：{final_result['total_raw_results']} → Rerank后：{final_result['total_reranked_results']}")
            logger.info(f"\n【各集合状态】")
            for coll, stats in final_result["collection_stats"].items():
                status = "✅ 成功" if stats["status"] == "success" else "❌ 失败"
                logger.info(f"- {coll}：{status}，结果数：{stats['result_count']}")
                if stats["status"] == "error":
                    logger.info(f"  错误：{stats['error_msg'][:60]}...")

            # 打印Rerank结果
            if final_result["reranked_results"]:
                logger.info(f"\n【Rerank后Top-{len(final_result['reranked_results'])}】")
                for res in final_result["reranked_results"]:
                    logger.info(f"\n📊 Rank {res['rank']}（得分：{res['total_score']:.4f}）")
                    logger.info(f"   来源：{res['source_collection']} > {res['file_name']}")
                    logger.info(
                        f"   扩展名：.{res['file_ext']} | L2相似度：{res['l2_score']} | 长度：{res['content_length']}字符")
                    logger.info(f"   预览：{res['content_preview']}")

            # 打印LLM响应
            logger.info(f"\n【📝 LLM响应】")
            logger.info("-" * 50)
            logger.info(final_result)
            logger.info("-" * 50)
            return final_result
        except RAGException as e:
            logger.error(f"\n测试失败：[{e.code}] {e.message}", exc_info=True)
            logger.info(f"\n错误：[{e.code}] {e.message}")
        except Exception as e:
            logger.error(f"\n未知错误：{str(e)}", exc_info=True)
            logger.info(f"\n未知错误：{str(e)}")

    # 2. 添加close方法，用于释放资源
    def close(self):
        """释放Milvus集合资源和关闭连接"""
        """释放资源"""
        try:
            for collection_name, wrapper in self.vector_stores.items():
                try:
                    # 完全移除is_loaded检查，直接释放
                    wrapper.collection.release()
                    logger.info(f"释放集合{collection_name}内存")
                except Exception as e:
                    logger.error(f"释放集合{collection_name}失败：{str(e)}", exc_info=True)
            # 关闭Milvus连接（如果存在）
            if connections.has_connection("default"):
                connections.disconnect("default")
                logger.info("已关闭Milvus连接")
        except Exception as e:
            logger.error(f"释放资源时发生错误：{str(e)}", exc_info=True)

# ------------------------------ 测试入口 ------------------------------
async def main():
    # 初始化日志（建议移到main开头，确保日志生效）
    logging.basicConfig(
        level=logging.INFO,
        format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
    )
    query_service = None
    try:
        # 捕获实例创建异常
        query_service = MultiCollectionQueryService(
            use_llm_rerank=False,
            collections=[
                "doc_documents_milvus",
                "docx_documents_milvus",
                "excel_documents_milvus",
                "json_documents_milvus",
                "pdf_documents_milvus",
                "txt_documents_milvus"
            ]  # 传入实际的集合名称列表
        )
        logger.info(f"服务初始化成功（可用集合数：{len(query_service.vector_stores)}）")
        # 调用查询
        custom_filter = {"file_name LIKE '劳动合同法%'"}
        result = await query_service.query_rag(
            user_question="劳动合同法中关于试用期的规定有哪些",
            user_filter=custom_filter
        )
        # 处理结果
        if result["status"] == "error":
            print(f"查询失败：{result['error']['message']}")
        else:
            print(f"LLM响应：{result['response']}")
    except RAGException as e:
        print(f"服务初始化失败：[{e.code}] {e.message}")
        logger.error(f"服务初始化失败：[{e.code}] {e.message}", exc_info=True)
    except Exception as e:
        print(f"未知错误：{str(e)}")
        logger.error(f"未知错误：{str(e)}", exc_info=True)



if __name__ == "__main__":
    try:
        # 优先使用asyncio.run()（Python 3.7+支持）
        asyncio.run(main())
    except RuntimeError as e:
        # 仅在“无当前循环”时使用get_event_loop()（兼容旧环境）
        if "cannot be called from a running event loop" in str(e):
            loop = asyncio.get_event_loop()
            loop.run_until_complete(main())
        else:
            raise