import json
import asyncio
from pathlib import Path
from typing import List, Dict, Any

from app.RAG.RAG_lx_jd import (
    vectorize_question,
    vector_search,
    filter_results,
    QuestionPreprocessor,
)

BASE_DIR = Path(__file__).resolve().parent
TEST_CASE_PATH = BASE_DIR / "rag_evaluation_cases.json"


def load_test_cases() -> List[Dict[str, Any]]:
    with open(TEST_CASE_PATH, "r", encoding="utf-8") as f:
        return json.load(f)


def clean_text(text: str) -> str:
    return " ".join(text.split()) if text else ""


async def evaluate_retrieval(top_k: int = 5):
    test_cases = load_test_cases()
    preprocessor = QuestionPreprocessor()

    test_case_summary = []
    metrics_rows = []

    total_precision = 0.0
    total_recall = 0.0
    total_f1 = 0.0

    for case in test_cases:
        case_id = case["id"]
        question = case["question"]
        golden_doc = case["golden_doc"]
        n_value = 1

        preprocessed = await asyncio.to_thread(
            preprocessor.comprehensive_preprocess, question
        )
        processed_question = preprocessed["final"]

        question_vec = await asyncio.to_thread(vectorize_question, processed_question)

        vec_res = await asyncio.to_thread(
            vector_search,
            question_vec,
            top_k,
            "./chroma_db3",
            "simple_rag3",
            None,
        )

        filtered = filter_results(vec_res, processed_question)
        m_value = len(filtered)

        golden_clean = clean_text(golden_doc)
        hit = any(clean_text(item.get("text", "")) == golden_clean for item in filtered)
        k_value = 1 if hit else 0

        recall = 100.0 * k_value / n_value if n_value else 0.0
        precision = 100.0 * k_value / m_value if m_value else 0.0
        f1 = (
            2 * precision * recall / (precision + recall)
            if (precision + recall) > 0
            else 0.0
        )

        total_precision += precision
        total_recall += recall
        total_f1 += f1

        test_case_summary.append(
            {
                "id": case_id,
                "question": question,
                "golden_doc": golden_doc,
                "N": n_value,
            }
        )

        metrics_rows.append(
            {
                "id": case_id,
                "M": m_value,
                "K": k_value,
                "precision": precision,
                "recall": recall,
                "f1": f1,
            }
        )

    case_count = len(test_cases)
    avg_precision = total_precision / case_count if case_count else 0.0
    avg_recall = total_recall / case_count if case_count else 0.0
    avg_f1 = total_f1 / case_count if case_count else 0.0

    print("\n测试用例清单")
    print("-" * 80)
    for entry in test_case_summary:
        print(f"用例 {entry['id']}:")
        print(f"  问题：{entry['question']}")
        print(f"  黄金文档：{entry['golden_doc']}")
        print(f"  N = {entry['N']}")
        print()

    print("\n检索结果与指标计算表")
    print("-" * 80)
    header = f"{'用例ID':<8}{'M':>5}{'K':>5}{'召回率%':>12}{'精确率%':>12}{'F1%':>10}"
    print(header)
    print("-" * len(header))
    for row in metrics_rows:
        print(
            f"{row['id']:<8}"
            f"{row['M']:>5}"
            f"{row['K']:>5}"
            f"{row['recall']:>12.2f}"
            f"{row['precision']:>12.2f}"
            f"{row['f1']:>10.2f}"
        )

    meets_recall = avg_recall >= 80.0
    meets_precision = avg_precision >= 70.0
    meets_f1 = avg_f1 >= 75.0
    is_pass = meets_recall and meets_precision and meets_f1

    print("\n评估总结")
    print("-" * 80)
    print(f"平均召回率：{avg_recall:.2f}%")
    print(f"平均精确率：{avg_precision:.2f}%")
    print(f"平均F1分数：{avg_f1:.2f}%")
    print(
        f"是否达标（召回率≥80%、精确率≥70%、F1≥75%）：{'是' if is_pass else '否'}"
    )

    suggestions = []
    if not meets_recall:
        suggestions.append("增加黄金文档与检索文本的一致性或扩充知识库覆盖度以提升召回率")
    if not meets_precision:
        suggestions.append("优化向量检索与结果过滤策略，降低无关片段混入从而提升精确率")
    if not meets_f1:
        suggestions.append("综合优化检索召回与过滤排序，改善整体 F1 指标")

    if suggestions:
        print("优化建议：")
        for idx, suggestion in enumerate(suggestions, start=1):
            print(f"  {idx}. {suggestion}")
    else:
        print("优化建议：当前指标已达标，可继续观察长期表现。")


if __name__ == "__main__":
    asyncio.run(evaluate_retrieval())

