# 1. 导入必要库（只保留核心依赖）
# 说明：这里的依赖兼容"教学/演示"与"生产"两种路径，
# - langchain 用于基础的文本切分（在 RAG_lx.py 中也有更完整实现）
# - chromadb / elasticsearch 分别用于向量检索与关键词检索
# - dashscope（通义千问）用于向量化（embedding），通过环境变量 DASHSCOPE_API_KEY 读取密钥
import sys
import os
import json
import asyncio
import re
import hashlib

from fastapi import APIRouter, Body

sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))


from chromadb import PersistentClient, Settings
from elasticsearch import Elasticsearch

from openai import OpenAI  # 添加OpenAI客户端导入，用于调用通义千问

from app.RAG.RAG_tool.get_data_tool import split_text2, get_data2, get_data, vectorize, sjyhua, split_text_grouped
from app.RAG.RAG_tool.rag_server import vector_search
from app.RAG.RAG_tool.rag_xlh import save_to_chroma, vectorize_question
from app.RAG.RAG_tool.knowledge_maintenance import KnowledgeBaseMaintainer
from app.RAG.RAG_tool.question_preprocessor import QuestionPreprocessor

EVAL_CASES_CACHE = None
EVAL_CASES_PATH = os.path.join(os.path.dirname(__file__), "rag_evaluation_cases.json")

# 添加日志函数
def dlog(message):
    """简单的日志函数"""
    print(f"[DEBUG] {message}")

# 2. 初始化客户端（简化配置，使用默认本地服务）
# ChromaDB 客户端（当前主用集合，用于写入与检索）
# - 持久化目录：./chroma_db3
# - 集合名：simple_rag3
chroma_client = PersistentClient(path="./chroma_db3")
chroma_collection = chroma_client.get_or_create_collection(name="simple_rag3")

# Elasticsearch 客户端（全文检索，默认本地单机）
es_client = Elasticsearch(["http://localhost:9200"])

RAG = APIRouter()


@RAG.post("/maintenance/deduplicate")
async def deduplicate_knowledge_base():
    """
    对知识库进行去重优化
    """
    try:
        maintainer = KnowledgeBaseMaintainer()
        result = await asyncio.to_thread(maintainer.deduplicate_chunks)
        return {
            "code": 200,
            "message": "去重操作完成",
            "data": result
        }
    except Exception as e:
        return {
            "code": 500,
            "message": f"去重操作失败: {str(e)}",
            "data": {}
        }


@RAG.post("/maintenance/add_documents")
async def add_documents_to_knowledge_base(documents: list):
    """
    添加新文档到知识库
    
    :param documents: 要添加的文档列表
    """
    try:
        maintainer = KnowledgeBaseMaintainer()
        result = await asyncio.to_thread(maintainer.add_new_documents, documents)
        return {
            "code": 200,
            "message": "文档添加完成",
            "data": result
        }

    except Exception as e:
        return {
            "code": 500,
            "message": f"文档添加失败: {str(e)}",
            "data": {}
        }


@RAG.post("/maintenance/remove_outdated")
async def remove_outdated_documents(criteria: dict):
    """
    删除过时的文档
    
    :param criteria: 删除标准，如 {"before_date": "2023-01-01"} 或 {"by_source": "old_source"}
    """
    try:
        maintainer = KnowledgeBaseMaintainer()
        result = await asyncio.to_thread(maintainer.remove_outdated_documents, criteria)
        return {
            "code": 200,
            "message": "删除过时文档完成",
            "data": result
        }
    except Exception as e:
        return {
            "code": 500,
            "message": f"删除过时文档失败: {str(e)}",
            "data": {}
        }


def run_rag_pipeline():
    # 步骤1：获取数据
    # - DB：获取小说基本信息
    # - PDF：读取并清洗指定数据源文档
    # 提示：get_data_tool/get_data 由工具包或降级实现提供
    raw_data = get_data()

    # print(f"步骤1：数据库获取数据完成{raw_data}")

    # 修正PDF文件路径，使用正确的绝对路径
    raw_data2 = get_data2(r"C:\Users\lzb\Desktop\lx\abcd\novel-fastapi\rag数据源\完整RAG数据源-小说创作系统.pdf")
    # print(f"步骤1：PDF获取数据完成内容是，{raw_data2} ")

    # # 步骤2：数据过滤（先过滤整个文档，再分割）
    #
    filtered_data, filter_report = sjyhua(raw_data,raw_data2)
    # print(f"步骤1.5：数据过滤完成，过滤报告：{filter_report}")
    #
    # # 步骤3：分割文本
    # - 分组：将 DB 小说整理为父（小说元信息）-子（简介片段）结构
    grouped = split_text_grouped(filtered_data)
    print(f"分割后的小说内容{grouped}")

    # - 语义分割：对过滤后的PDF文本按中文标题/编号进行分段，并对超长片段做窗口切分
    chunks1 = split_text2(filter_report)  # 使用过滤后的数据

    print("语义分割后的内容是")
    for i in chunks1:
        print(i)



    #
    # 步骤4：向量化（通义千问）- 对所有文本进行向量化
    # 提取纯文本列表用于向量化
    vectors = vectorize(chunks1, grouped)
    print(f"步骤4：向量化完成，得到{len(vectors)}个向量")
    
    # 步骤5：存入 ChromaDB
    save_to_chroma(vectors, chunks1, grouped)
    print("步骤5：存入ChromaDB完成")
    
    # 步骤6：知识库维护 - 定期更新和去重优化
    # 创建知识库维护实例
    maintainer = KnowledgeBaseMaintainer()
    # 执行去重操作，识别并删除重复的文档块
    dedup_result = maintainer.deduplicate_chunks()
    print(f"步骤6.1：知识库去重完成，去重结果：{dedup_result}")
    
    # 可以在这里添加新文档或删除过时文档的示例
    # 示例：添加新文档（在实际应用中可以根据需要调用）
    new_documents = [
        {
            "content": "这是2024年新增的行业报告内容",
            "source": "2024_annual_report",
            "title": "2024年行业报告",
            "author": "分析师",
            "category": "年度报告"
        }
    ]
    add_result = maintainer.add_new_documents(new_documents)
    print(f"步骤6.2：新增文档完成，添加结果：{add_result}")
    
    # 示例：删除过时文档（在实际应用中可以根据需要调用）
    remove_result = maintainer.remove_outdated_documents({"by_source": "2020_old_policy"})
    print(f"步骤6.3：删除过时文档完成，删除结果：{remove_result}")


#
#


def _tokenize(text: str) -> set[str]:
    """
    将文本拆解为词单元：
    - 中文按连续汉字分组
    - 英文/数字按连续字符
    """
    if not text:
        return set()
    tokens = re.findall(r"[\u4e00-\u9fa5]+|[a-zA-Z0-9]+", text.lower())
    return set(tokens)


def _clean_text(text: str) -> str:
    if not text:
        return ""
    # 标准化空白字符
    text = text.replace("\r", " ").replace("\n", " ")
    # 移除Markdown与列表符号
    text = re.sub(r"[#>*\-•：:]+", " ", text)
    # 合并多余空白
    text = " ".join(text.split())
    return text.strip()


def _normalize_for_match(text: str) -> str:
    """
    用于评估时的统一文本比较
    """
    if not text:
        return ""
    return _clean_text(text.lower())


def _is_text_match(reference: str, candidate: str) -> bool:
    """
    判断检索片段是否命中黄金答案
    - 完整包含（任意一侧包含另一侧）
    - 黄金文本的词汇覆盖度 >= 80%
    """
    if not reference or not candidate:
        return False

    if not reference or not candidate:
        return False

    if reference == candidate:
        return True

    if reference in candidate or candidate in reference:
        return True

    reference_tokens = _tokenize(reference)
    candidate_tokens = _tokenize(candidate)
    if not reference_tokens or not candidate_tokens:
        return False

    overlap = reference_tokens & candidate_tokens
    coverage = len(overlap) / len(reference_tokens) if reference_tokens else 0.0
    return coverage >= 0.8


def _coverage_ratio(reference_tokens: set[str], candidate_tokens: set[str]) -> float:
    if not reference_tokens or not candidate_tokens:
        return 0.0
    overlap = reference_tokens & candidate_tokens
    return len(overlap) / len(reference_tokens) if reference_tokens else 0.0


def _load_evaluation_cases():
    global EVAL_CASES_CACHE
    if EVAL_CASES_CACHE is not None:
        return EVAL_CASES_CACHE
    if not os.path.exists(EVAL_CASES_PATH):
        EVAL_CASES_CACHE = []
        return EVAL_CASES_CACHE
    try:
        with open(EVAL_CASES_PATH, "r", encoding="utf-8") as f:
            EVAL_CASES_CACHE = json.load(f)
    except Exception as exc:
        print(f"[WARN] 加载评估用例失败: {exc}")
        EVAL_CASES_CACHE = []
    return EVAL_CASES_CACHE


def filter_results(vec_res, question: str):
    """
    过滤检索结果，去除低相关性和重复的Chunk
    
    :param vec_res: 向量检索结果列表
    :param question: 用户问题文本，用于计算词汇重叠度
    :return: 过滤后的结果列表
    """
    question_tokens = _tokenize(question)
    
    significant_tokens = {token for token in question_tokens if len(token) >= 2}

    def evaluate_tags(metadata):
        tags = metadata.get("tags") if metadata else None
        if not tags:
            return 1.0
        tags_lower = [str(tag).lower() for tag in tags]
        negative_tags = {"非设定内容", "目录/标题", "短文本"}
        positive_hits = sum(
            1 for tag in tags_lower for token in significant_tokens if token in tag
        )
        if positive_hits > 0:
            return 1.3 + 0.1 * positive_hits
        if any(tag in negative_tags for tag in tags_lower):
            return 0.4
        return 1.0

    allow_low_confidence = []
    candidate_snapshot = []

    # 1. 相关性阈值过滤：先尝试高阈值（0.7），如无结果降至 0.45 并记录低置信度
    filtered_results = []
    for item in vec_res:
        distance = item.get('distance', None)
        similarity = 1.0 / (1.0 + distance) if distance is not None and distance >= 0 else 0
        metadata = item.get('metadata') or {}
        allow_tag = metadata.get("tags") and any("推荐" in str(tag) or "榜单" in str(tag) for tag in metadata["tags"])
        candidate_snapshot.append({
            "id": item.get("id"),
            "similarity": similarity,
            "tags": metadata.get("tags"),
            "title": metadata.get("title"),
            "source": metadata.get("source"),
        })
        if similarity >= 0.6:
            filtered_results.append(item)
        elif similarity >= 0.5 and allow_tag:
            item = dict(item)
            item["low_confidence"] = True
            allow_low_confidence.append(item)

    if not filtered_results and allow_low_confidence:
        filtered_results = allow_low_confidence
    
    if not filtered_results and candidate_snapshot:
        best = max(candidate_snapshot, key=lambda snap: snap["similarity"])
        print(f"[DEBUG][filter_results] 无高阈值命中，回退保留最高相似度 id={best['id']} sim={best['similarity']:.3f}")
        for item in vec_res:
            if item.get("id") == best["id"]:
                fallback_item = {
                    "id": item.get("id"),
                    "text": item.get("document", ""),
                    "distance": item.get("distance"),
                    "metadata": item.get("metadata") or {},
                    "low_confidence": True,
                }
                filtered_results = [fallback_item]
                break
    
    # 2. 去重：合并内容重复的检索结果
    # 基于内容哈希值去重，保留分数较高的结果
    unique_results = {}
    for item in filtered_results:
        # 获取文档内容
        content = item.get('document', '')
        # 计算相似度作为分数
        distance = item.get('distance', None)
        semantic_score = 1.0 / (1.0 + distance) if distance is not None and distance >= 0 else 0
        lexical_score = 0.0
        if question_tokens and content:
            content_tokens = _tokenize(content)
            if content_tokens:
                intersection = question_tokens & content_tokens
                union = question_tokens | content_tokens
                lexical_score = len(intersection) / len(union) if union else 0.0
        # 融合语义分数与词汇重叠分数，语义占比更高
        combined_score = semantic_score * 0.75 + lexical_score * 0.25
        tag_multiplier = evaluate_tags(item.get('metadata', {}))
        if item.get("low_confidence"):
            tag_multiplier *= 0.6
        combined_score *= tag_multiplier
        combined_score = round(combined_score, 6)
        # 使用内容的哈希值作为键进行去重
        content_hash = hashlib.md5(content.strip().encode("utf-8")).hexdigest() if content else None
        if not content_hash:
            continue
        if combined_score < 0.35:
            continue
        if content_hash not in unique_results or combined_score > unique_results[content_hash]['score']:
            unique_results[content_hash] = {
                'id': item['id'],
                'text': content,
                'score': combined_score,
                'semantic_score': round(semantic_score, 6),
                'lexical_score': round(lexical_score, 6),
                'distance': distance,
                'metadata': item.get('metadata', {})
            }
    
    # 转换为列表并按分数排序
    deduplicated_results = list(unique_results.values())
    deduplicated_results.sort(key=lambda x: x['score'], reverse=True)
    
    # 限制返回结果数量，避免过多结果干扰LLM生成
    deduplicated_results = deduplicated_results[:3]

    # 如果没有满足条件的结果，但原始检索结果不为空，返回最相关的一个结果
    if not deduplicated_results and vec_res:
        # 选择相似度最高的结果
        best_item = max(vec_res, key=lambda x: 1.0 / (1.0 + x.get('distance', float('inf'))) if x.get('distance') is not None else 0)
        deduplicated_results = [{
            'id': best_item['id'],
            'text': best_item.get('document', ''),
            'score': 1.0 / (1.0 + best_item.get('distance', 0)) if best_item.get('distance') is not None else 0,
            'semantic_score': 1.0 / (1.0 + best_item.get('distance', 0)) if best_item.get('distance') is not None else 0,
            'lexical_score': 0.0,
            'distance': best_item.get('distance'),
            'metadata': best_item.get('metadata', {})
        }]

    if not deduplicated_results:
        print("[DEBUG][filter_results] 所有候选被过滤，候选详情：")
        for snap in candidate_snapshot:
            print(
                f"  -> id={snap['id']} sim={snap['similarity']:.3f} "
                f"tags={snap['tags']} title={snap['title']} source={snap['source']}"
            )
        if filtered_results:
            deduplicated_results = filtered_results[:1]
            print(f"[DEBUG][filter_results] 仍保留回退候选 id={deduplicated_results[0].get('id')}")
    
    # 添加调试信息，显示最终结果的相似度
    if deduplicated_results:
        for i, result in enumerate(deduplicated_results):
            print(f"[DEBUG][filter_results] 最终结果 {i+1}: id={result['id']} score={result['score']:.3f}")
    
    return deduplicated_results


@RAG.post("/select")
async def run_rag_select(question: str, filter_options: dict = None):
    # 步骤7：用户问题预处理（纠错和改写）
    # 创建问题预处理器实例
    preprocessor = QuestionPreprocessor()
    # 执行问题预处理
    preprocessed_result = preprocessor.comprehensive_preprocess(question)
    
    # 使用预处理后的问题进行后续操作
    processed_question = preprocessed_result["final"]
    print(f"步骤7：用户问题预处理完成")
    print(f"  原始问题: {preprocessed_result['original']}")
    print(f"  纠错后问题: {preprocessed_result['corrected']}")
    print(f"  基础改写: {preprocessed_result['rewritten']}")
    print(f"  高级改写: {preprocessed_result['advanced_rewrite']}")
    print(f"  最终问题: {preprocessed_result['final']}")
    
    # 步骤8：用户问题向量化（通义千问）
    question_vec = vectorize_question(processed_question)
    print("步骤8：问题向量化完成")

    # 步骤9：向量检索（Chroma - chroma_db3/simple_rag3）
    # 确保使用正确的集合
    # 添加元数据过滤选项，确保检索结果更加精准和时效
    metadata_filter = filter_options.get("metadata_filter") if filter_options else None
    vec_res = vector_search(question_vec, top_k=5, persist_directory="./chroma_db3", collection_name="simple_rag3", metadata_filter=metadata_filter)
    print(f"步骤9：向量查询得到{len(vec_res)}个结果")

    # 步骤10：检索结果过滤去除
    # 去除低相关性、重复的 Chunk，避免干扰 LLM 生成
    # - 相关性阈值：过滤向量距离超过阈值的 Chunk（如余弦距离 < 0.7 的剔除）
    # - 去重：合并内容重复的检索结果
    filtered_result = filter_results(vec_res, processed_question)
    print(f"步骤10：结果过滤完成，得到{len(filtered_result)}个结果")

    # 步骤11：Prompt 构建
    # 将 "问题 + 检索知识" 组织成 LLM 能理解的格式，引导其基于知识回答
    # 获取检索到的知识内容
    retrieved_contents = [item["text"] for item in filtered_result]

    # 构建 Prompt 模板
    if retrieved_contents:
        prompt_template = f"""
        基于以下知识回答问题：
        {chr(10).join([f"知识{i+1}: {content}" for i, content in enumerate(retrieved_contents)])}
        
        问题：{processed_question}
        
        要求：只基于上述知识，不编造内容。如果知识中没有相关信息，请说明无法基于提供的知识回答该问题。
        """
    else:
        prompt_template = f"""
        问题：{processed_question}
        
        回答：无法找到与该问题相关的信息，请尝试重新表述问题或提供更多背景信息。
        """

    print("步骤11：Prompt构建完成")
    # print(f"  构建的Prompt: {prompt_template}")

    # 步骤12：LLM 生成回答
    # 核心环节：LLM 结合检索到的知识生成准确回答，避免幻觉
    # 使用通义千问模型生成回答
    client = None
    try:
        # 从环境变量获取API密钥
        api_key = os.getenv("DASHSCOPE_API_KEY")
        if not api_key:
            raise ValueError("未设置环境变量 DASHSCOPE_API_KEY")

        # 初始化OpenAI兼容客户端（连接阿里云百炼服务）
        client = OpenAI(
            api_key=api_key,
            base_url="https://dashscope.aliyuncs.com/compatible-mode/v1"
        )

        # 调用通义千问模型生成回答
        completion = client.chat.completions.create(
            model="qwen3-max",
            messages=[
                {"role": "system", "content": "你是一个智能助手，能够基于提供的知识准确回答用户问题。"},
                {"role": "user", "content": prompt_template}
            ],
            temperature=0.7,
            max_tokens=1000
        )

        # 提取生成的回答
        llm_response = completion.choices[0].message.content if completion.choices else "未能生成回答"

    except Exception as e:
        print(f"步骤12：LLM生成回答过程中发生异常: {e}")
        llm_response = "回答生成失败"

    print("步骤12：LLM生成回答完成")

    # 步骤13：结果优化
    if "无法找到" in llm_response or "未能生成" in llm_response:
        # 如果LLM未能生成有效回答，提供备选方案
        optimized_response = llm_response
    else:
        # 对回答进行简单的格式优化
        # 移除多余的空行和空格
        lines = [line.strip() for line in llm_response.split('\n') if line.strip()]
        optimized_response = '\n'.join(lines)
    
    # 步骤14：输出最终回答
    # 向用户返回清晰、准确的结果，部分场景可附带 "引用来源"（增强可信度）
    # 示例："回答：XXX 参考知识来源：文档《RAG 技术白皮书 2024》P15、网页 https://xxx"
    
    # 构建引用来源信息
    sources = []
    for i, item in enumerate(filtered_result):
        source_info = item.get('metadata', {})
        source_text = f"知识来源{i+1}: "
        if source_info.get('title'):
            source_text += f"标题: {source_info.get('title')}"
        if source_info.get('source'):
            source_text += f", 来源: {source_info.get('source')}"
        sources.append(source_text)
    
    # 构建最终响应结果
    final_response = {
        "answer": optimized_response,
        "sources": sources if sources else ["无明确来源"],
        "question_processing": preprocessed_result
    }

    print("步骤14：输出最终回答完成")
    print(f"  最终回答: {final_response}")

    # 包装返回格式，添加code字段以符合前端拦截器期望
    # print(f"最终返回结果:{final_response}")
    return {
        "code": 200,
        "message": "查询成功",
        "data": final_response
    }


async def _run_retrieval_evaluation(top_k: int = 5, case_limit: int | None = 3, case_ids: list[str] | None = None):
    """
    执行检索评估，返回指标与每个测试用例的详细结果
    
    :param top_k: 检索返回的候选数量
    :param case_limit: 最多评估多少个用例，默认 3；为 None 时评估全部
    :param case_ids: 指定只评估的用例 ID 列表
    """
    test_cases = _load_evaluation_cases()
    if not test_cases:
        return {
            "summary": {
                "case_count": 0,
                "average_precision": None,
                "average_recall": None,
                "average_f1": None,
                "requirements_met": False,
                "note": "未找到评估用例，请先维护 `rag_evaluation_cases.json`。"
            },
            "cases": []
        }

    if case_ids:
        id_set = {str(case_id) for case_id in case_ids}
        test_cases = [case for case in test_cases if str(case.get("id")) in id_set]

    if case_limit is not None and case_limit > 0:
        test_cases = test_cases[:case_limit]

    if not test_cases:
        return {
            "summary": {
                "case_count": 0,
                "average_precision": None,
                "average_recall": None,
                "average_f1": None,
                "requirements_met": False,
                "note": "没有匹配的评估用例，请检查 case_limit 或 case_ids 参数。"
            },
            "cases": []
        }

    preprocessor = QuestionPreprocessor()
    case_results = []

    total_precision = 0.0
    total_recall = 0.0
    total_f1 = 0.0

    for case in test_cases:
        case_id = case.get("id")
        question = case.get("question", "")
        golden_doc = case.get("golden_doc", "")

        preprocessed = await asyncio.to_thread(
            preprocessor.comprehensive_preprocess,
            question
        )
        processed_question = preprocessed.get("final", question)

        question_vec = await asyncio.to_thread(vectorize_question, processed_question)

        vec_res = await asyncio.to_thread(
            vector_search,
            question_vec,
            top_k,
            "./chroma_db3",
            "simple_rag3",
            None,
        )

        filtered = filter_results(vec_res, processed_question)

        normalized_golden = _normalize_for_match(golden_doc)
        retrieved_docs = []
        coverage_sum = 0.0
        golden_tokens = _tokenize(normalized_golden)

        for item in filtered:
            doc_text = item.get("text", "")
            normalized_doc = _normalize_for_match(doc_text)
            candidate_tokens = _tokenize(normalized_doc)

            if normalized_golden and normalized_golden in normalized_doc:
                coverage = 1.0
            else:
                coverage = _coverage_ratio(golden_tokens, candidate_tokens)

            coverage_sum += coverage
            matched = coverage >= 0.8

            retrieved_docs.append(
                {
                    "id": item.get("id"),
                    "text": doc_text,
                    "score": item.get("score"),
                    "semantic_score": item.get("semantic_score"),
                    "lexical_score": item.get("lexical_score"),
                    "metadata": item.get("metadata", {}),
                    "matched": matched,
                    "coverage": round(coverage, 4)
                }
            )

        m_value = len(retrieved_docs)
        n_value = 1 if normalized_golden else 0
        effective_hits = min(coverage_sum, float(n_value)) if n_value else 0.0

        precision = coverage_sum / m_value if m_value else 0.0
        recall = effective_hits / n_value if n_value else 0.0
        f1 = (2 * precision * recall / (precision + recall)) if (precision + recall) else 0.0

        total_precision += precision
        total_recall += recall
        total_f1 += f1

        case_results.append(
            {
                "id": case_id,
                "question": question,
                "processed_question": processed_question,
                "golden_doc": golden_doc,
                "retrieved_docs": retrieved_docs,
                "metrics": {
                    "precision": round(precision, 4),
                    "recall": round(recall, 4),
                    "f1": round(f1, 4),
                    "M": m_value,
                    "coverage_sum": round(coverage_sum, 4)
                }
            }
        )

    case_count = len(test_cases)
    avg_precision = total_precision / case_count if case_count else 0.0
    avg_recall = total_recall / case_count if case_count else 0.0
    avg_f1 = total_f1 / case_count if case_count else 0.0

    meets_recall = avg_recall >= 0.8
    meets_precision = avg_precision >= 0.7
    meets_f1 = avg_f1 >= 0.75

    summary = {
        "case_count": case_count,
        "average_precision": round(avg_precision, 4),
        "average_recall": round(avg_recall, 4),
        "average_f1": round(avg_f1, 4),
        "requirements_met": bool(meets_recall and meets_precision and meets_f1),
        "thresholds": {
            "precision": 0.7,
            "recall": 0.8,
            "f1": 0.75
        }
    }

    suggestions = []
    if not meets_recall:
        suggestions.append("建议扩充知识库覆盖度或改进切分策略，以提升召回率。")
    if not meets_precision:
        suggestions.append("优化检索过滤逻辑，降低无关片段混入以提升精确率。")
    if not meets_f1:
        suggestions.append("综合调优召回与精确率，改善整体 F1 指标表现。")

    if suggestions:
        summary["suggestions"] = suggestions

    return {
        "summary": summary,
        "cases": case_results
    }


def run_retrieval_evaluation(top_k: int = 5, case_limit: int | None = 3, case_ids: list[str] | None = None):
    """
    供脚本或其他模块直接调用的检索评估入口
    """
    try:
        loop = asyncio.get_running_loop()
    except RuntimeError:
        loop = None

    if loop and loop.is_running():
        raise RuntimeError("当前事件循环已在运行，无法直接调用同步评估方法，请使用 'await _run_retrieval_evaluation(...)'")

    return asyncio.run(_run_retrieval_evaluation(top_k=top_k, case_limit=case_limit, case_ids=case_ids))


@RAG.post("/evaluation/retrieval")
async def evaluate_retrieval_endpoint(payload: dict = Body(default_factory=dict)):
    """
    触发一次检索评估，返回每个测试用例的指标与总体表现
    """
    payload = payload or {}
    top_k = payload.get("top_k", 5)
    case_limit = payload.get("case_limit", 3)
    case_ids = payload.get("case_ids")

    try:
        evaluation_result = await _run_retrieval_evaluation(
            top_k=top_k,
            case_limit=case_limit,
            case_ids=case_ids
        )
        return {
            "code": 200,
            "message": "评估完成",
            "data": evaluation_result
        }
    except Exception as exc:
        return {
            "code": 500,
            "message": f"评估执行失败: {exc}",
            "data": {}
        }



# run_rag_pipeline()


if __name__ == "__main__":
    cli_case_limit = 3
    cli_top_k = 5

    for arg in sys.argv[1:]:
        if arg.isdigit():
            cli_case_limit = int(arg)
        elif arg.startswith("--cases="):
            ids = [item.strip() for item in arg.split("=", 1)[1].split(",") if item.strip()]
            evaluation = run_retrieval_evaluation(top_k=cli_top_k, case_limit=None, case_ids=ids)
            print(json.dumps(evaluation, ensure_ascii=False, indent=2))
            sys.exit(0)
        elif arg.startswith("--topk="):
            try:
                cli_top_k = int(arg.split("=", 1)[1])
            except ValueError:
                print(f"[WARN] 无法解析 top_k 参数：{arg}")

    evaluation = run_retrieval_evaluation(top_k=cli_top_k, case_limit=cli_case_limit)

    print("=========== 单用例指标 ===========")
    for case in evaluation.get("cases", []):
        metrics = case.get("metrics", {})
        print(
            f"- 用例 {case.get('id')}: "
            f"P={metrics.get('precision')}, "
            f"R={metrics.get('recall')}, "
            f"F1={metrics.get('f1')}, "
            f"覆盖总量={metrics.get('coverage_sum')}/{metrics.get('M')}"
        )
    print("=================================")

    print(json.dumps(evaluation, ensure_ascii=False, indent=2))