#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
劳动合同法RAG系统评估方案实现
基于真实数据的完整评估流程

作者: AI助手
日期: 2024-01-15
适用: 小白学生学习，函数式编程，简单易懂
"""

import os
import sys
import json
import time
import math
import re
from typing import List, Dict, Any, Tuple
from collections import Counter
import django

# 添加Django项目路径
sys.path.append('/Users/baimu/PycharmProjects/2504A/bossxm/boss')
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'boss.settings')
django.setup()

# 导入Django模型
from home.models import Jobposting
from user.models import City, Company

# 导入现有的RAG组件
from rag_test.legal_embeddings import text_embeddings
from rag_test.milvus_writer import write_to_milvus
from rag_test.preprocess_labor_law import preprocess_labor_law
from rag_test.structure_metadata import structure_labor_law_metadata
from utils.PDFLawReader import PDFLawReader

# ===============================================
# 第一部分：创建真实的劳动合同法评估数据集
# ===============================================

def create_real_labor_law_evaluation_dataset():
    """
    创建基于真实劳动合同法条文的评估数据集
    包含真实的法条内容和专业的法律问答
    
    Returns:
        list: 评估数据集列表
    """
    
    print("📚 创建劳动合同法评估数据集...")
    
    # 真实的劳动合同法评估数据
    evaluation_data = [
        # 法条查询类 - 基于第十九条（试用期规定）
        {
            "question_id": "q001",
            "question": "劳动合同试用期最长可以设定多久？",
            "question_type": "法条查询",
            "difficulty": "简单", 
            "expected_articles": ["第十九条"],
            "standard_answer": "根据《劳动合同法》第十九条规定：劳动合同期限三个月以上不满一年的，试用期不得超过一个月；劳动合同期限一年以上不满三年的，试用期不得超过二个月；三年以上固定期限和无固定期限的劳动合同，试用期不得超过六个月。因此，试用期最长不得超过六个月。",
            "key_requirements": ["一个月", "二个月", "六个月", "合同期限", "第十九条"],
            "legal_accuracy_weight": 0.5,
            "practicality_weight": 0.3,
            "retrieval_weight": 0.2
        },
        
        # 情景咨询类 - 基于第四十二条和第八十七条（孕期保护）  
        {
            "question_id": "q002",
            "question": "女员工怀孕期间被公司辞退，可以要求什么赔偿？",
            "question_type": "情景咨询",
            "difficulty": "中等",
            "expected_articles": ["第四十二条", "第八十七条"],
            "standard_answer": "根据《劳动合同法》第四十二条规定，女职工在孕期、产期、哺乳期的，用人单位不得依照本法第四十条、第四十一条的规定解除劳动合同。如果用人单位违法解除劳动合同，根据第八十七条规定，应当依照本法第四十七条规定的经济补偿标准的二倍向劳动者支付赔偿金。",
            "key_requirements": ["孕期", "不得解除", "违法解除", "二倍", "赔偿金", "第四十二条", "第八十七条"],
            "legal_accuracy_weight": 0.6,
            "practicality_weight": 0.3,
            "retrieval_weight": 0.1
        },
        
        # 比较分析类 - 基于第三十六条、第四十四条（解除vs终止）
        {
            "question_id": "q003", 
            "question": "劳动合同的解除和终止有什么区别？",
            "question_type": "比较分析",
            "difficulty": "中等",
            "expected_articles": ["第三十六条", "第四十四条"],
            "standard_answer": "劳动合同解除和终止的区别在于：解除是指劳动合同期限届满前，当事人提前解决劳动关系，包括协商解除（第三十六条）、劳动者单方解除、用人单位解除等情形。终止是指劳动合同期满或者出现法定终止情形时劳动合同自然失效，如第四十四条规定的劳动合同期满、劳动者退休等情形。",
            "key_requirements": ["解除", "终止", "提前", "期满", "协商", "单方", "自然失效"],
            "legal_accuracy_weight": 0.4,
            "practicality_weight": 0.4,
            "retrieval_weight": 0.2
        },
        
        # 程序指导类 - 基于第三十九条、第四十条（解除程序）
        {
            "question_id": "q004",
            "question": "用人单位解除劳动合同需要履行什么程序？",
            "question_type": "程序指导",
            "difficulty": "复杂",
            "expected_articles": ["第三十九条", "第四十条", "第五十条"],
            "standard_answer": "用人单位解除劳动合同的程序包括：1）过失性解除（第三十九条）：可以解除但需要调查取证、听取员工申辩；2）非过失性解除（第四十条）：应当提前三十日以书面形式通知劳动者本人或者额外支付劳动者一个月工资后解除；3）所有解除都必须按第五十条规定出具解除劳动合同的证明，并在十五日内为劳动者办理档案和社会保险关系转移手续。",
            "key_requirements": ["书面通知", "提前三十日", "额外支付", "解除证明", "十五日内", "档案转移"],
            "legal_accuracy_weight": 0.4,
            "practicality_weight": 0.5,
            "retrieval_weight": 0.1
        },
        
        # 法条查询类 - 基于第四十七条（经济补偿）
        {
            "question_id": "q005",
            "question": "经济补偿金的计算标准是什么？",
            "question_type": "法条查询", 
            "difficulty": "中等",
            "expected_articles": ["第四十七条"],
            "standard_answer": "根据《劳动合同法》第四十七条规定，经济补偿按劳动者在本单位工作的年限，每满一年支付一个月工资的标准向劳动者支付。六个月以上不满一年的，按一年计算；不满六个月的，向劳动者支付半个月工资的经济补偿。",
            "key_requirements": ["每满一年", "一个月工资", "六个月以上", "按一年计算", "半个月工资"],
            "legal_accuracy_weight": 0.6,
            "practicality_weight": 0.3,
            "retrieval_weight": 0.1
        },
        
        # 情景咨询类 - 基于第二十条（试用期工资）
        {
            "question_id": "q006",
            "question": "试用期期间工资应该如何发放？",
            "question_type": "情景咨询",
            "difficulty": "简单",
            "expected_articles": ["第二十条"],
            "standard_answer": "根据《劳动合同法》第二十条规定，劳动者在试用期的工资不得低于本单位相同岗位最低档工资或者劳动合同约定工资的百分之八十，并不得低于用人单位所在地的最低工资标准。",
            "key_requirements": ["不得低于", "最低档工资", "百分之八十", "最低工资标准"],
            "legal_accuracy_weight": 0.5,
            "practicality_weight": 0.4,
            "retrieval_weight": 0.1
        }
    ]
    
    print(f"✅ 评估数据集创建完成，共 {len(evaluation_data)} 个问题")
    for item in evaluation_data:
        print(f"   {item['question_id']}: {item['question']} ({item['question_type']})")
    
    return evaluation_data

def load_real_labor_law_data():
    """
    加载真实的劳动合同法条文数据
    从PDF文件中提取结构化法条信息
    
    Returns:
        list: 结构化法条数据列表
    """
    
    print("📖 加载真实劳动合同法数据...")
    
    try:
        # 检查PDF文件是否存在
        pdf_path = "/Users/baimu/PycharmProjects/2504A/bossxm/boss/劳动合同法.pdf"
        if not os.path.exists(pdf_path):
            print(f"❌ 未找到劳动合同法PDF文件: {pdf_path}")
            return create_mock_labor_law_data()
        
        # 读取PDF文件
        pdf_reader = PDFLawReader(pdf_path)
        pdf_reader.open_document()
        full_text = pdf_reader.extract_full_text()
        
        # 按条款切分
        from rag_test.rag_compete import split_labor_law_articles
        split_articles = split_labor_law_articles(full_text)
        
        # 数据预处理
        processed = preprocess_labor_law(split_articles)
        
        # 结构化处理
        structured_data = structure_labor_law_metadata(processed)
        
        print(f"✅ 成功加载 {len(structured_data)} 个法条数据")
        return structured_data
        
    except Exception as e:
        print(f"❌ 加载劳动合同法数据失败: {e}")
        print("🔄 使用模拟数据替代...")
        return create_mock_labor_law_data()

def create_mock_labor_law_data():
    """
    创建模拟的劳动合同法数据（当真实数据不可用时）
    
    Returns:
        list: 模拟法条数据
    """
    
    mock_data = [
        {
            "id": "labor_law_019",
            "content_clean": "劳动合同期限三个月以上不满一年的，试用期不得超过一个月；劳动合同期限一年以上不满三年的，试用期不得超过二个月；三年以上固定期限和无固定期限的劳动合同，试用期不得超过六个月。",
            "law_name": "中华人民共和国劳动合同法",
            "chapter": "第二章 劳动合同的订立",
            "chapter_code": "02",
            "article_num": "第十九条",
            "article_num_code": "019",
            "article_title": "试用期期限",
            "applicable_scenarios": "contract_signing,probation_period",
            "retrieval_weight": 1.0
        },
        {
            "id": "labor_law_042", 
            "content_clean": "劳动者有下列情形之一的，用人单位不得依照本法第四十条、第四十一条的规定解除劳动合同：（四）女职工在孕期、产期、哺乳期的。",
            "law_name": "中华人民共和国劳动合同法",
            "chapter": "第四章 劳动合同的解除和终止",
            "chapter_code": "04",
            "article_num": "第四十二条",
            "article_num_code": "042", 
            "article_title": "不得解除劳动合同的情形",
            "applicable_scenarios": "contract_termination,pregnancy_protection",
            "retrieval_weight": 1.0
        },
        {
            "id": "labor_law_087",
            "content_clean": "用人单位违反本法规定解除或者终止劳动合同的，应当依照本法第四十七条规定的经济补偿标准的二倍向劳动者支付赔偿金。",
            "law_name": "中华人民共和国劳动合同法",
            "chapter": "第七章 法律责任",
            "chapter_code": "07",
            "article_num": "第八十七条",
            "article_num_code": "087",
            "article_title": "违法解除的赔偿",
            "applicable_scenarios": "illegal_termination,compensation",
            "retrieval_weight": 1.0
        }
    ]
    
    print(f"✅ 创建模拟法条数据 {len(mock_data)} 条")
    return mock_data

# ===============================================
# 第二部分：检索质量评估函数
# ===============================================

def evaluate_legal_retrieval_accuracy(question, retrieved_articles, expected_articles):
    """
    评估法条检索的准确性
    
    Args:
        question (str): 用户问题
        retrieved_articles (list): 检索到的法条列表 ["第十九条", "第二十条"]
        expected_articles (list): 期望的相关法条 ["第十九条"]
    
    Returns:
        dict: 检索准确性评估结果
    """
    
    print(f"🔍 评估检索准确性: {question[:30]}...")
    
    # 1. 计算召回率：找到了多少相关法条
    relevant_found = 0
    for expected in expected_articles:
        if expected in retrieved_articles:
            relevant_found += 1
    
    recall = relevant_found / len(expected_articles) if expected_articles else 0
    
    # 2. 计算精确率：检索到的法条中有多少是相关的  
    precision = relevant_found / len(retrieved_articles) if retrieved_articles else 0
    
    # 3. 计算F1分数
    if precision + recall > 0:
        f1_score = 2 * precision * recall / (precision + recall)
    else:
        f1_score = 0
    
    # 4. 详细分析
    missing_articles = [art for art in expected_articles if art not in retrieved_articles]
    irrelevant_articles = [art for art in retrieved_articles if art not in expected_articles]
    
    # 5. 性能等级判断
    if f1_score >= 0.9:
        performance_level = "优秀"
    elif f1_score >= 0.7:
        performance_level = "良好"
    elif f1_score >= 0.5:
        performance_level = "一般"
    else:
        performance_level = "需要改进"
    
    result = {
        "question": question,
        "recall": round(recall, 3),
        "precision": round(precision, 3),
        "f1_score": round(f1_score, 3),
        "found_relevant": relevant_found,
        "total_expected": len(expected_articles),
        "total_retrieved": len(retrieved_articles),
        "missing_articles": missing_articles,
        "irrelevant_articles": irrelevant_articles,
        "performance_level": performance_level
    }
    
    return result

def evaluate_legal_ranking_quality(question, ranked_articles, expected_articles, relevance_scores):
    """
    评估法条排序质量（NDCG指标）
    
    Args:
        question (str): 用户问题
        ranked_articles (list): 按相关性排序的法条列表
        expected_articles (list): 相关法条列表
        relevance_scores (dict): 法条相关性分数 {"第十九条": 3, "第二十条": 1}
    
    Returns:
        dict: 排序质量评估结果
    """
    
    print(f"📊 评估排序质量: {question[:30]}...")
    
    # 1. 计算DCG (Discounted Cumulative Gain)
    dcg = 0
    for i, article in enumerate(ranked_articles[:5]):  # 只看前5个结果
        relevance = relevance_scores.get(article, 0)
        if relevance > 0:
            dcg += relevance / math.log2(i + 2)  # i+2 避免log2(1)=0
    
    # 2. 计算IDCG (Ideal DCG) - 按理想排序计算
    ideal_scores = sorted([score for score in relevance_scores.values() if score > 0], reverse=True)
    idcg = 0
    for i, score in enumerate(ideal_scores[:5]):
        idcg += score / math.log2(i + 2)
    
    # 3. 计算NDCG
    ndcg = dcg / idcg if idcg > 0 else 0
    
    # 4. 分析前3个结果的相关性
    top_3_articles = ranked_articles[:3]
    relevant_in_top3 = len([art for art in top_3_articles if art in expected_articles])
    
    # 5. 详细分析前3个结果
    top3_analysis = []
    for i, article in enumerate(top_3_articles):
        relevance = relevance_scores.get(article, 0)
        if relevance >= 3:
            level = "高度相关"
        elif relevance >= 2:
            level = "中等相关"
        elif relevance >= 1:
            level = "弱相关"
        else:
            level = "不相关"
        
        top3_analysis.append({
            "position": i + 1,
            "article": article,
            "relevance_score": relevance,
            "relevance_level": level
        })
    
    # 6. 排序质量等级
    if ndcg >= 0.8:
        quality_level = "优秀"
    elif ndcg >= 0.6:
        quality_level = "良好"
    elif ndcg >= 0.4:
        quality_level = "一般"
    else:
        quality_level = "需要改进"
    
    result = {
        "question": question,
        "dcg": round(dcg, 3),
        "idcg": round(idcg, 3),
        "ndcg": round(ndcg, 3),
        "relevant_in_top3": relevant_in_top3,
        "total_relevant": len(expected_articles),
        "ranking_quality": quality_level,
        "top3_analysis": top3_analysis
    }
    
    return result

def connect_to_real_milvus():
    """
    连接到真实的Milvus数据库
    获取已存储的法条向量数据
    
    Returns:
        object: Milvus连接对象，如果连接失败返回None
    """
    
    print("🔌 连接Milvus数据库...")
    
    try:
        from pymilvus import connections, Collection
        
        # 连接到Milvus
        connections.connect("default", host="localhost", port="19530")
        
        # 获取集合
        collection_name = "labor_contract_law"  # 劳动合同法集合
        collection = Collection(collection_name)
        
        # 加载集合到内存
        collection.load()
        
        print(f"✅ 成功连接到Milvus集合: {collection_name}")
        print(f"📊 集合中向量数量: {collection.num_entities}")
        
        return collection
        
    except Exception as e:
        print(f"❌ 连接Milvus失败: {e}")
        print("🔄 将使用模拟检索结果...")
        return None

def search_articles_in_milvus(question, milvus_collection, top_k=5):
    """
    在Milvus中搜索相关法条
    
    Args:
        question (str): 用户问题
        milvus_collection: Milvus集合对象
        top_k (int): 返回结果数量
    
    Returns:
        list: 检索到的法条列表
    """
    
    if milvus_collection is None:
        # 模拟检索结果
        return simulate_retrieval_results(question)
    
    try:
        # 将问题向量化
        from rag_test.legal_embeddings import text_embeddings
        # 创建临时数据结构来调用向量化函数
        temp_data = [{"vector_text": question}]
        question_vector = text_embeddings(temp_data)[0]
        
        # 在Milvus中搜索
        search_params = {"metric_type": "L2", "params": {"nprobe": 10}}
        results = milvus_collection.search(
            data=[question_vector],
            anns_field="embedding",  # 真实集合中的向量字段名
            param=search_params,
            limit=top_k,
            output_fields=["article_num", "content_clean", "article_title", "chapter"]
        )
        
        # 提取法条编号
        retrieved_articles = []
        for hits in results:
            for hit in hits:
                article_num = hit.entity.get('article_num', '')
                if article_num:
                    retrieved_articles.append(article_num)
        
        print(f"🔍 Milvus检索结果: {retrieved_articles}")
        return retrieved_articles
        
    except Exception as e:
        print(f"❌ Milvus搜索失败: {e}")
        return simulate_retrieval_results(question)

def simulate_retrieval_results(question):
    """
    模拟检索结果（当Milvus不可用时）
    
    Args:
        question (str): 用户问题
    
    Returns:
        list: 模拟的检索结果
    """
    
    print(f"🎭 模拟检索: {question[:30]}...")
    
    # 基于关键词模拟检索结果
    if "试用期" in question:
        return ["第十九条", "第二十条", "第二十一条"]
    elif "怀孕" in question or "孕期" in question:
        return ["第四十二条", "第八十七条", "第四十条"]
    elif "解除" in question and "终止" in question:
        return ["第三十六条", "第四十四条", "第三十九条"]
    elif "程序" in question or "解雇" in question:
        return ["第三十九条", "第四十条", "第五十条"]
    elif "经济补偿" in question:
        return ["第四十七条", "第四十六条", "第八十七条"]
    elif "试用期" in question and "工资" in question:
        return ["第二十条", "第十九条"]
    else:
        return ["第一条", "第二条", "第三条"]

# ===============================================
# 第三部分：答案质量评估函数
# ===============================================

def evaluate_legal_answer_accuracy(question, generated_answer, standard_answer, expected_articles):
    """
    评估法律答案的准确性
    包括法条引用、关键信息、法律术语、逻辑一致性
    
    Args:
        question (str): 用户问题
        generated_answer (str): 系统生成的答案
        standard_answer (str): 标准参考答案
        expected_articles (list): 期望引用的法条
    
    Returns:
        dict: 答案准确性评估结果
    """
    
    print(f"⚖️ 评估答案准确性: {question[:30]}...")
    
    # 1. 检查法条引用准确性
    article_citation_score = check_article_citations(generated_answer, expected_articles)
    
    # 2. 检查关键信息完整性
    key_info_score = check_key_information_completeness(generated_answer, question)
    
    # 3. 检查法律术语使用准确性
    legal_term_score = check_legal_terminology_accuracy(generated_answer)
    
    # 4. 检查逻辑一致性
    logic_score = check_logical_consistency(generated_answer, standard_answer)
    
    # 5. 计算综合准确性得分（权重分配）
    weights = {"citation": 0.3, "key_info": 0.3, "terminology": 0.2, "logic": 0.2}
    overall_score = (
        article_citation_score * weights["citation"] +
        key_info_score * weights["key_info"] +
        legal_term_score * weights["terminology"] +
        logic_score * weights["logic"]
    )
    
    # 6. 详细分析
    cited_articles = extract_cited_articles(generated_answer)
    missing_key_info = identify_missing_key_info(generated_answer, question)
    term_issues = identify_legal_term_issues(generated_answer)
    
    # 7. 准确性等级判断
    if overall_score >= 9:
        accuracy_level = "优秀"
    elif overall_score >= 7:
        accuracy_level = "良好"
    elif overall_score >= 5:
        accuracy_level = "一般"
    else:
        accuracy_level = "需要改进"
    
    result = {
        "question": question,
        "article_citation_score": round(article_citation_score, 1),
        "key_information_score": round(key_info_score, 1),
        "legal_terminology_score": round(legal_term_score, 1),
        "logical_consistency_score": round(logic_score, 1),
        "overall_accuracy_score": round(overall_score, 1),
        "accuracy_level": accuracy_level,
        "detailed_analysis": {
            "cited_articles": cited_articles,
            "missing_key_info": missing_key_info,
            "legal_term_issues": term_issues
        }
    }
    
    return result

def check_article_citations(answer, expected_articles):
    """检查法条引用的准确性"""
    
    # 提取答案中的法条引用
    cited_articles = extract_cited_articles(answer)
    
    if not expected_articles:
        return 10 if not cited_articles else 8  # 如果不需要引用法条
    
    # 计算引用准确性
    correct_citations = len([art for art in expected_articles if art in cited_articles])
    
    # 召回率和精确率
    recall = correct_citations / len(expected_articles)
    precision = correct_citations / len(cited_articles) if cited_articles else 0
    
    # F1分数转换为10分制
    if recall + precision > 0:
        f1 = 2 * recall * precision / (recall + precision)
        return f1 * 10
    else:
        return 0

def extract_cited_articles(answer):
    """提取答案中引用的法条"""
    import re
    return re.findall(r'第[零一二三四五六七八九十百]+条', answer)

def check_key_information_completeness(answer, question):
    """检查关键信息完整性"""
    
    # 根据问题获取关键信息要求
    key_requirements = get_key_requirements_by_question(question)
    
    if not key_requirements:
        return 8  # 默认分数
    
    # 检查答案中包含的关键信息
    found_requirements = 0
    for requirement in key_requirements:
        if requirement.lower() in answer.lower():
            found_requirements += 1
    
    # 计算完整性得分
    completeness = found_requirements / len(key_requirements)
    return completeness * 10

def get_key_requirements_by_question(question):
    """根据问题类型确定关键信息要求"""
    
    if "试用期" in question and "多久" in question:
        return ["一个月", "二个月", "六个月", "合同期限"]
    elif "怀孕" in question and ("辞退" in question or "解除" in question):
        return ["不得解除", "赔偿金", "二倍", "孕期"]
    elif "解除" in question and "终止" in question:
        return ["解除", "终止", "提前", "期满", "自然"]
    elif "程序" in question or "解雇" in question:
        return ["书面通知", "提前", "证明", "手续"]
    elif "经济补偿" in question:
        return ["每满一年", "一个月工资", "六个月"]
    elif "试用期" in question and "工资" in question:
        return ["不得低于", "百分之八十", "最低工资"]
    else:
        return []

def check_legal_terminology_accuracy(answer):
    """检查法律术语使用准确性"""
    
    score = 10  # 满分开始
    
    # 检查术语使用规范性
    if "劳动合同" not in answer and any(word in answer for word in ["合同", "协议"]):
        score -= 1  # 术语不够准确
    
    if "解雇" in answer and "解除" not in answer:
        score -= 1  # 应该使用"解除劳动合同"而不是"解雇"
    
    if "员工" in answer and "劳动者" not in answer:
        score -= 0.5  # 法律条文中一般用"劳动者"
    
    if "公司" in answer and "用人单位" not in answer:
        score -= 0.5  # 法律条文中一般用"用人单位"
    
    return max(0, score)

def check_logical_consistency(generated_answer, standard_answer):
    """检查逻辑一致性"""
    
    # 检查逻辑连接词的使用
    logic_indicators = ["根据", "按照", "如果", "则", "应当", "需要", "因此", "所以"]
    
    generated_logic = sum(1 for word in logic_indicators if word in generated_answer)
    standard_logic = sum(1 for word in logic_indicators if word in standard_answer)
    
    if standard_logic == 0:
        return 8  # 基础分
    
    # 逻辑结构相似性评分
    similarity = min(generated_logic / standard_logic, 1.0)
    return similarity * 10

def identify_missing_key_info(answer, question):
    """识别遗漏的关键信息"""
    key_requirements = get_key_requirements_by_question(question)
    missing = []
    
    for requirement in key_requirements:
        if requirement.lower() not in answer.lower():
            missing.append(requirement)
    
    return missing

def identify_legal_term_issues(answer):
    """识别法律术语问题"""
    issues = []
    
    if "合同" in answer and "劳动合同" not in answer:
        issues.append("建议使用'劳动合同'而不是'合同'")
    
    if "解雇" in answer:
        issues.append("建议使用'解除劳动合同'而不是'解雇'")
    
    if "员工" in answer and "劳动者" not in answer:
        issues.append("建议使用'劳动者'而不是'员工'")
    
    return issues

def evaluate_legal_answer_practicality(question, generated_answer, question_type):
    """
    评估法律答案的实用性
    包括实用指导、清晰度、完整性、法条依据
    
    Args:
        question (str): 用户问题
        generated_answer (str): 生成的答案
        question_type (str): 问题类型
    
    Returns:
        dict: 实用性评估结果
    """
    
    print(f"🎯 评估答案实用性: {question[:30]}...")
    
    # 1. 检查实用指导价值
    guidance_score = check_practical_guidance(generated_answer, question_type)
    
    # 2. 检查答案清晰度
    clarity_score = check_answer_clarity(generated_answer)
    
    # 3. 检查问题解决完整性
    completeness_score = check_problem_solving_completeness(generated_answer, question, question_type)
    
    # 4. 检查法条依据提供
    legal_basis_score = check_legal_basis_provision(generated_answer)
    
    # 5. 计算综合实用性得分
    weights = {"guidance": 0.3, "clarity": 0.25, "completeness": 0.25, "legal_basis": 0.2}
    overall_practicality = (
        guidance_score * weights["guidance"] +
        clarity_score * weights["clarity"] +
        completeness_score * weights["completeness"] +
        legal_basis_score * weights["legal_basis"]
    )
    
    # 6. 生成改进建议
    improvement_suggestions = generate_practicality_suggestions(
        guidance_score, clarity_score, completeness_score, legal_basis_score
    )
    
    # 7. 实用性等级判断
    if overall_practicality >= 8.5:
        practicality_level = "非常实用"
    elif overall_practicality >= 7:
        practicality_level = "比较实用"
    elif overall_practicality >= 5.5:
        practicality_level = "一般实用"
    else:
        practicality_level = "实用性不足"
    
    result = {
        "question": question,
        "question_type": question_type,
        "practical_guidance_score": round(guidance_score, 1),
        "clarity_score": round(clarity_score, 1),
        "completeness_score": round(completeness_score, 1),
        "legal_basis_score": round(legal_basis_score, 1),
        "overall_practicality_score": round(overall_practicality, 1),
        "practicality_level": practicality_level,
        "improvement_suggestions": improvement_suggestions
    }
    
    return result

def check_practical_guidance(answer, question_type):
    """检查是否提供实用指导"""
    
    guidance_indicators = {
        "法条查询": ["根据", "规定", "明确规定", "具体标准"],
        "情景咨询": ["应当", "需要", "可以", "建议", "注意", "权利"],
        "比较分析": ["区别", "不同", "差异", "对比", "分别"],
        "程序指导": ["步骤", "流程", "程序", "首先", "其次", "然后", "最后"]
    }
    
    indicators = guidance_indicators.get(question_type, [])
    if not indicators:
        return 8
    
    found_indicators = sum(1 for indicator in indicators if indicator in answer)
    guidance_ratio = found_indicators / len(indicators)
    
    return min(guidance_ratio * 10, 10)

def check_answer_clarity(answer):
    """检查答案清晰度"""
    
    # 1. 句子长度检查
    sentences = [s for s in answer.split('。') if s.strip()]
    if sentences:
        avg_sentence_length = sum(len(s) for s in sentences) / len(sentences)
        length_score = 10 if avg_sentence_length <= 50 else 8
    else:
        length_score = 5
    
    # 2. 逻辑连接词检查
    logic_connectors = ["首先", "其次", "然后", "最后", "因此", "所以", "但是", "然而"]
    has_logic = any(connector in answer for connector in logic_connectors)
    logic_score = 10 if has_logic else 7
    
    # 3. 具体性检查（是否有具体数字、标准）
    has_specifics = any(char.isdigit() for char in answer)
    specific_score = 10 if has_specifics else 8
    
    # 综合评分
    clarity_score = (length_score + logic_score + specific_score) / 3
    return clarity_score

def check_problem_solving_completeness(answer, question, question_type):
    """检查是否完整解决问题"""
    
    completeness_checks = {
        "法条查询": ["具体条文", "明确标准"],
        "情景咨询": ["法律后果", "应对措施", "权利义务"],
        "比较分析": ["明确区别", "具体差异", "各自特点"],
        "程序指导": ["具体步骤", "操作流程", "注意事项"]
    }
    
    required_elements = completeness_checks.get(question_type, [])
    if not required_elements:
        return 8
    
    elements_found = 0
    for element in required_elements:
        element_keywords = element.split()
        if any(keyword in answer for keyword in element_keywords):
            elements_found += 1
    
    completeness_ratio = elements_found / len(required_elements)
    return completeness_ratio * 10

def check_legal_basis_provision(answer):
    """检查是否提供法条依据"""
    
    # 检查具体法条引用
    article_citations = extract_cited_articles(answer)
    
    if article_citations:
        return 10  # 有具体法条引用
    elif "劳动合同法" in answer or "法律" in answer:
        return 7   # 有法律依据但不够具体
    else:
        return 3   # 缺乏法律依据

def generate_practicality_suggestions(guidance_score, clarity_score, completeness_score, legal_basis_score):
    """生成实用性改进建议"""
    suggestions = []
    
    if guidance_score < 7:
        suggestions.append("建议增加更多实用的操作指导和具体建议")
    if clarity_score < 7:
        suggestions.append("建议使用更清晰简洁的表达方式，避免句子过长")
    if completeness_score < 7:
        suggestions.append("建议提供更完整的问题解决方案，包含所有必要要素")
    if legal_basis_score < 7:
        suggestions.append("建议增加具体的法条依据，提高答案权威性")
    
    if not suggestions:
        suggestions.append("答案实用性良好，建议继续保持专业水准")
    
    return suggestions

# ===============================================
# 第四部分：端到端评估流程
# ===============================================

def create_mock_rag_system():
    """
    创建模拟的RAG系统（用于测试）
    在实际应用中应该连接到真实的RAG系统
    
    Returns:
        object: 模拟RAG系统对象
    """
    
    class MockLegalRAGSystem:
        def __init__(self):
            self.milvus_collection = connect_to_real_milvus()
            self.legal_data = load_real_labor_law_data()
        
        def answer_question(self, question):
            """模拟回答法律问题"""
            import time
            import random
            
            start_time = time.time()
            
            # 1. 检索相关法条
            retrieved_articles = search_articles_in_milvus(question, self.milvus_collection)
            
            # 2. 生成答案（这里使用简化的模拟生成）
            generated_answer = self.generate_mock_answer(question, retrieved_articles)
            
            end_time = time.time()
            response_time = end_time - start_time
            
            return {
                "answer": generated_answer,
                "retrieved_articles": retrieved_articles,
                "response_time": response_time,
                "confidence": random.uniform(0.7, 0.95)
            }
        
        def generate_mock_answer(self, question, retrieved_articles):
            """生成模拟答案"""
            
            # 基于问题类型生成不同质量的模拟答案
            if "试用期" in question and "多久" in question:
                return "根据《劳动合同法》第十九条规定，试用期的长度取决于劳动合同期限：合同期限三个月以上不满一年的，试用期不得超过一个月；一年以上不满三年的，试用期不得超过二个月；三年以上固定期限和无固定期限的劳动合同，试用期不得超过六个月。"
            
            elif "怀孕" in question and "辞退" in question:
                return "根据《劳动合同法》第四十二条规定，女职工在孕期、产期、哺乳期的，用人单位不得解除劳动合同。如果用人单位违法解除，根据第八十七条应当支付二倍的经济补偿作为赔偿金。"
            
            elif "解除" in question and "终止" in question:
                return "劳动合同解除和终止的主要区别是：解除是在合同期限届满前提前结束劳动关系，可以是协商解除、单方解除等；终止是合同期满或出现法定情形时自然结束，如期满、退休等。"
            
            elif "程序" in question:
                return "用人单位解除劳动合同需要履行以下程序：1）调查取证，确认解除事由；2）听取劳动者申辩；3）提前30日书面通知或支付一个月工资代通知金；4）出具解除证明；5）办理档案和社保转移手续。"
            
            elif "经济补偿" in question:
                return "根据第四十七条规定，经济补偿按劳动者工作年限计算，每满一年支付一个月工资；六个月以上不满一年的按一年计算；不满六个月的支付半个月工资。"
            
            elif "试用期" in question and "工资" in question:
                return "根据第二十条规定，试用期工资不得低于本单位相同岗位最低档工资或合同约定工资的80%，且不得低于用人单位所在地最低工资标准。"
            
            else:
                return f"这是关于{question}的法律回答。根据相关法律规定，需要综合考虑具体情况。"
    
    return MockLegalRAGSystem()

def run_complete_legal_rag_evaluation(rag_system, evaluation_dataset):
    """
    运行完整的法律RAG系统评估
    
    Args:
        rag_system: 法律RAG系统实例
        evaluation_dataset (list): 评估数据集
    
    Returns:
        dict: 完整评估报告
    """
    
    print("\n" + "="*60)
    print("🚀 开始法律RAG系统完整评估")
    print("="*60)
    
    all_results = []
    
    for i, item in enumerate(evaluation_dataset, 1):
        question = item["question"]
        question_type = item["question_type"]
        expected_articles = item["expected_articles"]
        standard_answer = item["standard_answer"]
        key_requirements = item["key_requirements"]
        
        print(f"\n📋 [{i}/{len(evaluation_dataset)}] 评估问题: {question}")
        
        # 1. 获取系统响应
        try:
            system_response = rag_system.answer_question(question)
            retrieved_articles = system_response.get("retrieved_articles", [])
            generated_answer = system_response.get("answer", "")
            response_time = system_response.get("response_time", 0)
            confidence = system_response.get("confidence", 0.5)
            
            print(f"   💭 生成答案: {generated_answer[:100]}...")
            print(f"   🔍 检索法条: {retrieved_articles}")
            
        except Exception as e:
            print(f"   ❌ 系统响应失败: {e}")
            continue
        
        # 2. 检索质量评估
        retrieval_result = evaluate_legal_retrieval_accuracy(question, retrieved_articles, expected_articles)
        
        # 3. 模拟相关性分数（实际应用中应该由专家标注）
        relevance_scores = simulate_relevance_scores(retrieved_articles, expected_articles)
        ranking_result = evaluate_legal_ranking_quality(question, retrieved_articles, expected_articles, relevance_scores)
        
        # 4. 答案准确性评估
        accuracy_result = evaluate_legal_answer_accuracy(question, generated_answer, standard_answer, expected_articles)
        
        # 5. 实用性评估
        practicality_result = evaluate_legal_answer_practicality(question, generated_answer, question_type)
        
        # 6. 计算综合得分
        overall_score = calculate_overall_legal_score(
            retrieval_result, ranking_result, accuracy_result, practicality_result, item
        )
        
        # 7. 汇总单个问题的评估结果
        question_result = {
            "question_id": item["question_id"],
            "question": question,
            "question_type": question_type,
            "difficulty": item["difficulty"],
            "system_response": {
                "answer": generated_answer,
                "retrieved_articles": retrieved_articles,
                "response_time": response_time,
                "confidence": confidence
            },
            "evaluation_results": {
                "retrieval": retrieval_result,
                "ranking": ranking_result,
                "accuracy": accuracy_result,
                "practicality": practicality_result,
                "overall_score": overall_score
            }
        }
        
        all_results.append(question_result)
        
        # 打印单个问题的评估结果
        print(f"   📊 检索F1: {retrieval_result['f1_score']:.3f}")
        print(f"   📊 排序NDCG: {ranking_result['ndcg']:.3f}")
        print(f"   📊 准确性: {accuracy_result['overall_accuracy_score']:.1f}/10")
        print(f"   📊 实用性: {practicality_result['overall_practicality_score']:.1f}/10")
        print(f"   📊 综合得分: {overall_score:.1f}/10")
    
    # 8. 生成综合评估报告
    final_report = generate_comprehensive_evaluation_report(all_results)
    
    print(f"\n✅ 评估完成! 共评估 {len(all_results)} 个问题")
    return final_report

def simulate_relevance_scores(retrieved_articles, expected_articles):
    """模拟法条相关性分数（实际应用中应该由专家标注）"""
    
    relevance_scores = {}
    
    for article in retrieved_articles:
        if article in expected_articles:
            relevance_scores[article] = 3  # 高度相关
        elif any(expected in article or article in expected for expected in expected_articles):
            relevance_scores[article] = 2  # 中等相关
        else:
            relevance_scores[article] = 1  # 弱相关
    
    return relevance_scores

def calculate_overall_legal_score(retrieval_result, ranking_result, accuracy_result, practicality_result, question_item):
    """
    计算法律RAG系统综合得分
    考虑不同权重和问题特点
    
    Args:
        retrieval_result: 检索评估结果
        ranking_result: 排序评估结果
        accuracy_result: 准确性评估结果
        practicality_result: 实用性评估结果
        question_item: 问题数据项
    
    Returns:
        float: 综合得分 (0-10)
    """
    
    # 获取问题特定的权重
    retrieval_weight = question_item.get("retrieval_weight", 0.2)
    accuracy_weight = question_item.get("legal_accuracy_weight", 0.5)
    practicality_weight = question_item.get("practicality_weight", 0.3)
    
    # 标准化分数到10分制
    retrieval_score = retrieval_result["f1_score"] * 10
    ranking_score = ranking_result["ndcg"] * 10
    accuracy_score = accuracy_result["overall_accuracy_score"]
    practicality_score = practicality_result["overall_practicality_score"]
    
    # 综合检索分数（F1和NDCG的平均）
    combined_retrieval_score = (retrieval_score + ranking_score) / 2
    
    # 计算加权总分
    overall_score = (
        combined_retrieval_score * retrieval_weight +
        accuracy_score * accuracy_weight +
        practicality_score * practicality_weight
    )
    
    return overall_score

def generate_comprehensive_evaluation_report(all_results):
    """
    生成综合评估报告
    
    Args:
        all_results (list): 所有问题的评估结果
    
    Returns:
        dict: 综合评估报告
    """
    
    if not all_results:
        return {"error": "没有评估结果"}
    
    total_questions = len(all_results)
    
    # 1. 计算总体指标
    avg_retrieval_f1 = sum(r["evaluation_results"]["retrieval"]["f1_score"] for r in all_results) / total_questions
    avg_ranking_ndcg = sum(r["evaluation_results"]["ranking"]["ndcg"] for r in all_results) / total_questions
    avg_accuracy_score = sum(r["evaluation_results"]["accuracy"]["overall_accuracy_score"] for r in all_results) / total_questions
    avg_practicality_score = sum(r["evaluation_results"]["practicality"]["overall_practicality_score"] for r in all_results) / total_questions
    avg_overall_score = sum(r["evaluation_results"]["overall_score"] for r in all_results) / total_questions
    avg_response_time = sum(r["system_response"]["response_time"] for r in all_results) / total_questions
    avg_confidence = sum(r["system_response"]["confidence"] for r in all_results) / total_questions
    
    # 2. 按问题类型分析
    type_analysis = analyze_performance_by_question_type(all_results)
    
    # 3. 按难度分析
    difficulty_analysis = analyze_performance_by_difficulty(all_results)
    
    # 4. 识别最佳和最差表现
    best_case = max(all_results, key=lambda x: x["evaluation_results"]["overall_score"])
    worst_case = min(all_results, key=lambda x: x["evaluation_results"]["overall_score"])
    
    # 5. 性能瓶颈分析
    bottlenecks = identify_performance_bottlenecks(all_results)
    
    # 6. 生成改进建议
    improvement_suggestions = generate_detailed_improvement_suggestions(all_results, bottlenecks)
    
    # 7. 系统性能等级
    performance_level = get_system_performance_level(avg_overall_score)
    
    # 8. 汇总报告
    report = {
        "evaluation_summary": {
            "total_questions": total_questions,
            "evaluation_date": time.strftime("%Y-%m-%d %H:%M:%S"),
            "system_type": "劳动合同法RAG系统",
            "evaluation_version": "v1.0"
        },
        "overall_performance": {
            "average_retrieval_f1": round(avg_retrieval_f1, 3),
            "average_ranking_ndcg": round(avg_ranking_ndcg, 3),
            "average_accuracy_score": round(avg_accuracy_score, 1),
            "average_practicality_score": round(avg_practicality_score, 1),
            "average_overall_score": round(avg_overall_score, 1),
            "average_response_time": round(avg_response_time, 3),
            "average_confidence": round(avg_confidence, 3),
            "performance_level": performance_level
        },
        "performance_by_question_type": type_analysis,
        "performance_by_difficulty": difficulty_analysis,
        "best_performance_case": {
            "question_id": best_case["question_id"],
            "question": best_case["question"],
            "score": round(best_case["evaluation_results"]["overall_score"], 1),
            "strengths": identify_case_strengths(best_case)
        },
        "worst_performance_case": {
            "question_id": worst_case["question_id"],
            "question": worst_case["question"],
            "score": round(worst_case["evaluation_results"]["overall_score"], 1),
            "weaknesses": identify_case_weaknesses(worst_case)
        },
        "performance_bottlenecks": bottlenecks,
        "improvement_suggestions": improvement_suggestions,
        "detailed_results": all_results
    }
    
    return report

def analyze_performance_by_question_type(all_results):
    """按问题类型分析性能"""
    
    type_stats = {}
    
    for result in all_results:
        question_type = result["question_type"]
        overall_score = result["evaluation_results"]["overall_score"]
        
        if question_type not in type_stats:
            type_stats[question_type] = {
                "count": 0,
                "total_score": 0,
                "scores": [],
                "retrieval_scores": [],
                "accuracy_scores": [],
                "practicality_scores": []
            }
        
        type_stats[question_type]["count"] += 1
        type_stats[question_type]["total_score"] += overall_score
        type_stats[question_type]["scores"].append(overall_score)
        type_stats[question_type]["retrieval_scores"].append(result["evaluation_results"]["retrieval"]["f1_score"])
        type_stats[question_type]["accuracy_scores"].append(result["evaluation_results"]["accuracy"]["overall_accuracy_score"])
        type_stats[question_type]["practicality_scores"].append(result["evaluation_results"]["practicality"]["overall_practicality_score"])
    
    # 计算每种类型的详细统计
    type_analysis = {}
    for question_type, stats in type_stats.items():
        avg_score = stats["total_score"] / stats["count"]
        avg_retrieval = sum(stats["retrieval_scores"]) / stats["count"]
        avg_accuracy = sum(stats["accuracy_scores"]) / stats["count"]
        avg_practicality = sum(stats["practicality_scores"]) / stats["count"]
        
        type_analysis[question_type] = {
            "question_count": stats["count"],
            "average_overall_score": round(avg_score, 1),
            "average_retrieval_f1": round(avg_retrieval, 3),
            "average_accuracy_score": round(avg_accuracy, 1),
            "average_practicality_score": round(avg_practicality, 1),
            "performance_level": get_system_performance_level(avg_score),
            "score_range": f"{min(stats['scores']):.1f}-{max(stats['scores']):.1f}"
        }
    
    return type_analysis

def analyze_performance_by_difficulty(all_results):
    """按问题难度分析性能"""
    
    difficulty_stats = {}
    
    for result in all_results:
        difficulty = result["difficulty"]
        overall_score = result["evaluation_results"]["overall_score"]
        
        if difficulty not in difficulty_stats:
            difficulty_stats[difficulty] = {
                "count": 0,
                "total_score": 0,
                "scores": []
            }
        
        difficulty_stats[difficulty]["count"] += 1
        difficulty_stats[difficulty]["total_score"] += overall_score
        difficulty_stats[difficulty]["scores"].append(overall_score)
    
    # 计算每种难度的统计
    difficulty_analysis = {}
    for difficulty, stats in difficulty_stats.items():
        avg_score = stats["total_score"] / stats["count"]
        
        difficulty_analysis[difficulty] = {
            "question_count": stats["count"],
            "average_score": round(avg_score, 1),
            "performance_level": get_system_performance_level(avg_score),
            "score_range": f"{min(stats['scores']):.1f}-{max(stats['scores']):.1f}"
        }
    
    return difficulty_analysis

def get_system_performance_level(score):
    """根据综合得分判断系统性能等级"""
    if score >= 8.5:
        return "优秀"
    elif score >= 7.0:
        return "良好"
    elif score >= 5.5:
        return "一般"
    else:
        return "需要改进"

def identify_case_strengths(case):
    """识别最佳案例的优势"""
    strengths = []
    
    retrieval = case["evaluation_results"]["retrieval"]
    accuracy = case["evaluation_results"]["accuracy"]
    practicality = case["evaluation_results"]["practicality"]
    
    if retrieval["f1_score"] >= 0.8:
        strengths.append("检索精准")
    if accuracy["overall_accuracy_score"] >= 8:
        strengths.append("答案准确")
    if practicality["overall_practicality_score"] >= 8:
        strengths.append("实用性强")
    if case["system_response"]["response_time"] < 2.0:
        strengths.append("响应迅速")
    
    return strengths

def identify_case_weaknesses(case):
    """识别最差案例的弱点"""
    weaknesses = []
    
    retrieval = case["evaluation_results"]["retrieval"]
    accuracy = case["evaluation_results"]["accuracy"]
    practicality = case["evaluation_results"]["practicality"]
    
    if retrieval["f1_score"] < 0.5:
        weaknesses.append("检索效果差")
    if accuracy["overall_accuracy_score"] < 6:
        weaknesses.append("答案准确性不足")
    if practicality["overall_practicality_score"] < 6:
        weaknesses.append("实用性不够")
    if case["system_response"]["response_time"] > 5.0:
        weaknesses.append("响应较慢")
    
    return weaknesses

def identify_performance_bottlenecks(all_results):
    """识别性能瓶颈"""
    
    # 计算各项指标的平均分
    avg_retrieval = sum(r["evaluation_results"]["retrieval"]["f1_score"] for r in all_results) / len(all_results)
    avg_accuracy = sum(r["evaluation_results"]["accuracy"]["overall_accuracy_score"] for r in all_results) / len(all_results)
    avg_practicality = sum(r["evaluation_results"]["practicality"]["overall_practicality_score"] for r in all_results) / len(all_results)
    
    bottlenecks = []
    
    if avg_retrieval < 0.7:
        bottlenecks.append({
            "component": "检索模块",
            "issue": "法条检索准确率不足",
            "current_score": round(avg_retrieval, 3),
            "severity": "高"
        })
    
    if avg_accuracy < 7.5:
        bottlenecks.append({
            "component": "答案生成模块",
            "issue": "法律答案准确性有待提升",
            "current_score": round(avg_accuracy, 1),
            "severity": "高"
        })
    
    if avg_practicality < 7.0:
        bottlenecks.append({
            "component": "实用性",
            "issue": "答案实用指导价值不够",
            "current_score": round(avg_practicality, 1),
            "severity": "中"
        })
    
    return bottlenecks

def generate_detailed_improvement_suggestions(all_results, bottlenecks):
    """生成详细的改进建议"""
    
    suggestions = []
    
    for bottleneck in bottlenecks:
        if bottleneck["component"] == "检索模块":
            suggestions.append({
                "area": "检索优化",
                "priority": "高",
                "specific_actions": [
                    "优化法条向量化方法，使用法律领域专用嵌入模型",
                    "增加法律术语同义词扩展，提高查询召回率", 
                    "调整Milvus检索参数，优化相似度计算方法",
                    "改进法条文本预处理，提高语义表示质量"
                ]
            })
        
        elif bottleneck["component"] == "答案生成模块":
            suggestions.append({
                "area": "答案质量提升",
                "priority": "高",
                "specific_actions": [
                    "设计专业的法律Prompt模板，强调法条引用准确性",
                    "建立法条引用验证机制，确保引用正确",
                    "加强法律术语使用规范性训练",
                    "增加法律专家人工审核环节"
                ]
            })
        
        elif bottleneck["component"] == "实用性":
            suggestions.append({
                "area": "实用性改进",
                "priority": "中",
                "specific_actions": [
                    "增加具体的操作指导和实践建议",
                    "优化答案结构，提高可读性和实用性",
                    "收集用户反馈，了解实际需求",
                    "增加典型案例说明，增强指导效果"
                ]
            })
    
    # 通用改进建议
    suggestions.append({
        "area": "系统持续优化",
        "priority": "中",
        "specific_actions": [
            "建立定期评估机制，持续监控系统性能",
            "收集真实用户使用数据，优化评估数据集",
            "定期更新法条库，确保内容的时效性",
            "建立专家评审机制，提高答案质量"
        ]
    })
    
    return suggestions

# ===============================================
# 第五部分：测试和演示函数
# ===============================================

def print_evaluation_report(report):
    """
    打印格式化的评估报告
    
    Args:
        report (dict): 评估报告
    """
    
    print("\n" + "="*80)
    print("📋 劳动合同法RAG系统评估报告")
    print("="*80)
    
    # 1. 评估概要
    summary = report["evaluation_summary"]
    print(f"\n📊 评估概要:")
    print(f"   评估时间: {summary['evaluation_date']}")
    print(f"   系统类型: {summary['system_type']}")
    print(f"   评估问题数: {summary['total_questions']}")
    print(f"   评估版本: {summary['evaluation_version']}")
    
    # 2. 总体性能
    overall = report["overall_performance"]
    print(f"\n🎯 总体性能:")
    print(f"   平均检索F1分数: {overall['average_retrieval_f1']}")
    print(f"   平均排序NDCG分数: {overall['average_ranking_ndcg']}")
    print(f"   平均准确性得分: {overall['average_accuracy_score']}/10")
    print(f"   平均实用性得分: {overall['average_practicality_score']}/10")
    print(f"   平均综合得分: {overall['average_overall_score']}/10")
    print(f"   平均响应时间: {overall['average_response_time']:.3f}秒")
    print(f"   平均置信度: {overall['average_confidence']:.3f}")
    print(f"   系统性能等级: {overall['performance_level']}")
    
    # 3. 按问题类型分析
    print(f"\n📈 按问题类型分析:")
    for question_type, stats in report["performance_by_question_type"].items():
        print(f"   {question_type}:")
        print(f"      问题数量: {stats['question_count']}")
        print(f"      平均得分: {stats['average_overall_score']}/10")
        print(f"      检索F1: {stats['average_retrieval_f1']}")
        print(f"      准确性: {stats['average_accuracy_score']}/10")
        print(f"      实用性: {stats['average_practicality_score']}/10")
        print(f"      性能等级: {stats['performance_level']}")
        print(f"      得分范围: {stats['score_range']}")
    
    # 4. 按难度分析
    print(f"\n📊 按问题难度分析:")
    for difficulty, stats in report["performance_by_difficulty"].items():
        print(f"   {difficulty}:")
        print(f"      问题数量: {stats['question_count']}")
        print(f"      平均得分: {stats['average_score']}/10")
        print(f"      性能等级: {stats['performance_level']}")
        print(f"      得分范围: {stats['score_range']}")
    
    # 5. 最佳表现案例
    best = report["best_performance_case"]
    print(f"\n🏆 最佳表现案例:")
    print(f"   问题ID: {best['question_id']}")
    print(f"   问题: {best['question']}")
    print(f"   得分: {best['score']}/10")
    print(f"   优势: {', '.join(best['strengths'])}")
    
    # 6. 最差表现案例
    worst = report["worst_performance_case"]
    print(f"\n🔴 最差表现案例:")
    print(f"   问题ID: {worst['question_id']}")
    print(f"   问题: {worst['question']}")
    print(f"   得分: {worst['score']}/10")
    print(f"   弱点: {', '.join(worst['weaknesses'])}")
    
    # 7. 性能瓶颈
    print(f"\n⚠️ 性能瓶颈:")
    for bottleneck in report["performance_bottlenecks"]:
        print(f"   {bottleneck['component']}: {bottleneck['issue']}")
        print(f"      当前分数: {bottleneck['current_score']}")
        print(f"      严重程度: {bottleneck['severity']}")
    
    # 8. 改进建议
    print(f"\n💡 改进建议:")
    for suggestion in report["improvement_suggestions"]:
        print(f"   {suggestion['area']} (优先级: {suggestion['priority']}):")
        for action in suggestion["specific_actions"]:
            print(f"      - {action}")
    
    print("\n" + "="*80)

def save_evaluation_report(report, filename=None):
    """
    保存评估报告到JSON文件
    
    Args:
        report (dict): 评估报告
        filename (str): 文件名，如果为None则自动生成
    """
    
    if filename is None:
        timestamp = time.strftime("%Y%m%d_%H%M%S")
        filename = f"labor_law_rag_evaluation_report_{timestamp}.json"
    
    filepath = f"/Users/baimu/PycharmProjects/2504A/bossxm/boss/{filename}"
    
    try:
        with open(filepath, 'w', encoding='utf-8') as f:
            json.dump(report, f, ensure_ascii=False, indent=2)
        
        print(f"\n💾 评估报告已保存到: {filepath}")
        
    except Exception as e:
        print(f"\n❌ 保存报告失败: {e}")

def run_evaluation_demo():
    """
    运行评估演示
    展示完整的评估流程和结果
    """
    
    print("\n" + "🚀" * 20)
    print("🚀 劳动合同法RAG系统评估演示开始 🚀")
    print("🚀" * 20)
    
    # 1. 创建评估数据集
    print("\n📚 步骤1: 创建评估数据集")
    evaluation_dataset = create_real_labor_law_evaluation_dataset()
    
    # 2. 加载法条数据
    print("\n📖 步骤2: 加载劳动合同法数据")
    legal_data = load_real_labor_law_data()
    
    # 3. 创建RAG系统
    print("\n🤖 步骤3: 创建RAG系统")
    rag_system = create_mock_rag_system()
    
    # 4. 运行完整评估
    print("\n🔍 步骤4: 运行完整评估流程")
    evaluation_report = run_complete_legal_rag_evaluation(rag_system, evaluation_dataset)
    
    # 5. 打印评估报告
    print("\n📋 步骤5: 展示评估报告")
    print_evaluation_report(evaluation_report)
    
    # 6. 保存评估报告
    print("\n💾 步骤6: 保存评估报告")
    save_evaluation_report(evaluation_report)
    
    print("\n" + "✅" * 20)
    print("✅ 劳动合同法RAG系统评估演示完成 ✅")
    print("✅" * 20)
    
    return evaluation_report

# ===============================================
# 主程序入口
# ===============================================

if __name__ == "__main__":
    """
    主程序入口
    运行劳动合同法RAG系统评估
    """
    
    try:
        # 更新TODO状态
        print("📋 更新任务状态...")
        
        # 运行完整的评估演示
        final_report = run_evaluation_demo()
        
        print(f"\n🎉 评估完成!")
        print(f"📊 最终综合得分: {final_report['overall_performance']['average_overall_score']:.1f}/10")
        print(f"🏅 系统性能等级: {final_report['overall_performance']['performance_level']}")
        
    except KeyboardInterrupt:
        print("\n\n⏹️ 用户中断评估")
    except Exception as e:
        print(f"\n\n❌ 评估过程中出现错误: {e}")
        import traceback
        traceback.print_exc()
    finally:
        print("\n👋 感谢使用劳动合同法RAG评估系统!")
