import os
import json
import sys

# 获取当前文件所在目录
current_dir = os.path.dirname(os.path.abspath(__file__))
# 构建评估数据集的绝对路径
EVALUATION_DATASET_PATH = os.path.join(os.path.dirname(os.path.dirname(current_dir)), "evaluation_dataset.json")

def load_evaluation_dataset():
    """
    加载评估数据集
    格式: {
        "query1": ["relevant_doc1", "relevant_doc2", ...],
        "query2": ["relevant_doc3", "relevant_doc4", ...],
        ...
    }
    """
    import json
    import os
    print(f"尝试加载评估数据集: {EVALUATION_DATASET_PATH}")
    if os.path.exists(EVALUATION_DATASET_PATH):
        try:
            with open(EVALUATION_DATASET_PATH, 'r', encoding='utf-8') as f:
                data = json.load(f)
                print(f"成功加载评估数据集，包含 {len(data)} 个查询")
                return data
        except Exception as e:
            print(f"加载评估数据集失败: {e}")
            return {}
    else:
        print(f"评估数据集文件不存在: {EVALUATION_DATASET_PATH}")
    return {}


# 初始化客户端（与存储时保持一致）
from chromadb import PersistentClient
from elasticsearch import Elasticsearch


# 步骤7：向量查询（从ChromaDB查相似片段）
def vector_search(question_vec, top_k=8, persist_directory="./chroma_db3", collection_name="simple_rag3", metadata_filter=None) -> list:
    """
    在ChromaDB中搜索与问题向量最相似的文档
    
    :param question_vec: 问题的向量表示
    :param top_k: 返回最相似的K个结果
    :param persist_directory: ChromaDB持久化目录
    :param collection_name: 集合名称
    :param metadata_filter: 元数据过滤条件，例如 {"date": {"$gte": "2023-01-01"}} 或 {"source": "novel_creation_guide"}
    :return: 搜索结果列表
    """
    # 初始化ChromaDB客户端和集合
    chroma_client = PersistentClient(path=persist_directory)
    chroma_collection = chroma_client.get_or_create_collection(name=collection_name)
    
    # 构建查询参数
    query_params = {
        "query_embeddings": [question_vec],
        "n_results": top_k
    }
    
    # 如果提供了元数据过滤条件，则添加到查询参数中
    if metadata_filter:
        query_params["where"] = metadata_filter
    
    # 执行向量搜索
    try:
        results = chroma_collection.query(**query_params)
        
        # 处理结果
        search_results = []
        if results['ids'] and results['distances'] and results['documents']:
            for i in range(len(results['ids'][0])):
                search_results.append({
                    'id': results['ids'][0][i],
                    'distance': results['distances'][0][i],
                    'document': results['documents'][0][i] if results['documents'][0] else "",
                    'metadata': results['metadatas'][0][i] if results['metadatas'] and results['metadatas'][0] else {}
                })
        
        return search_results
    except Exception as e:
        print(f"向量搜索过程中发生异常: {e}")
        return []


# 步骤8：ES全文查询（从ES查关键词匹配）
def es_search(question, top_k=8) -> list:
    """
    在Elasticsearch中搜索与问题关键词匹配的文档
    
    :param question: 用户问题
    :param top_k: 返回最匹配的K个结果
    :return: 搜索结果列表
    """
    # 初始化Elasticsearch客户端
    es_client = Elasticsearch(["http://localhost:9200"])
    index_name = "simple_rag_es2"
    
    # 构造查询语句，提高相关性匹配
    query = {
        "query": {
            "bool": {
                "should": [
                    {
                        "multi_match": {
                            "query": question,
                            "fields": ["content^1.5", "title^2.0", "author^1.2", "category^1.3"],
                            "type": "best_fields",
                            "tie_breaker": 0.3
                        }
                    },
                    {
                        "match_phrase": {
                            "content": {
                                "query": question,
                                "boost": 2.0
                            }
                        }
                    },
                    {
                        "match": {
                            "content": {
                                "query": question,
                                "boost": 1.0
                            }
                        }
                    }
                ],
                "minimum_should_match": 1
            }
        },
        "size": top_k,
        "min_score": 0.7  # 设置合理的最小分数阈值
    }
    
    try:
        # 执行搜索
        response = es_client.search(index=index_name, body=query)
        
        # 处理结果
        search_results = []
        for hit in response['hits']['hits']:
            search_results.append({
                'id': hit['_id'],
                'score': hit['_score'],
                'source': hit['_source']
            })
        
        return search_results
    except Exception as e:
        print(f"Elasticsearch搜索过程中发生异常: {e}")
        return []



def calculate_real_metrics(query, retrieved_docs, evaluation_dataset):
    """
    基于真实标签数据计算精确的评估指标
    
    Args:
        query: 用户查询
        retrieved_docs: 检索到的文档列表
        evaluation_dataset: 真实标签数据集
        
    Returns:
        dict: 包含precision, recall, f1_score的字典
    """
    if not evaluation_dataset:
        # 如果没有真实标签数据，返回空指标并提示原因
        return {
            "精确率": None,
            "召回率": None,
            "F1分数": None,
            "precision": None,
            "recall": None,
            "f1_score": None,
            "note": "评估数据集缺失，暂无法计算指标"
        }
    
    # 查找与查询最匹配的键
    matched_query = None
    for q in evaluation_dataset.keys():
        # 使用更宽松的匹配方式
        if query == q or query in q or q in query:
            matched_query = q
            break
    
    # 如果没有找到匹配的查询
    if not matched_query:
        print(f"未找到匹配的查询。查询: '{query}'")
        print("可用的查询键:", list(evaluation_dataset.keys()))
        return {
            "精确率": None,
            "召回率": None,
            "F1分数": None,
            "precision": None,
            "recall": None,
            "f1_score": None,
            "note": "评估数据集中未配置该问题，无法计算指标"
        }
    
    # 获取与查询相关的文档集合
    relevant_docs = set(evaluation_dataset[matched_query])
    print(f"匹配的查询: '{matched_query}'")
    print(f"相关文档数量: {len(relevant_docs)}")
    print(f"检索到的文档数量: {len(retrieved_docs)}")
    
    # 如果没有相关文档或没有检索到文档
    if not relevant_docs:
        return {
            "精确率": None,
            "召回率": None,
            "F1分数": None,
            "precision": None,
            "recall": None,
            "f1_score": None,
            "note": "评估数据集中该问题的相关文档为空"
        }

    if not retrieved_docs:
        return {
            "精确率": 0.0,
            "召回率": 0.0,
            "F1分数": 0.0,
            "precision": 0.0,
            "recall": 0.0,
            "f1_score": 0.0,
            "note": "检索结果为空"
        }
    
    # 将检索到的文档转换为集合，并进行预处理以处理格式差异
    # 移除换行符和多余空格以进行比较
    retrieved_docs_set = set()
    for doc in retrieved_docs:
        cleaned_doc = " ".join(doc.replace('\n', ' ').split())
        retrieved_docs_set.add(cleaned_doc)
    
    # 同样处理相关文档
    relevant_docs_set = set()
    for doc in relevant_docs:
        cleaned_doc = " ".join(doc.replace('\n', ' ').split())
        relevant_docs_set.add(cleaned_doc)
    
    # 计算真正例（相关且被检索到的文档）
    true_positives = len(relevant_docs_set.intersection(retrieved_docs_set))
    print(f"真正例数量: {true_positives}")
    
    # 计算精确率 (Precision) = 真正例 / (真正例 + 假正例)
    precision = true_positives / len(retrieved_docs_set) if len(retrieved_docs_set) > 0 else 0.0
    
    # 计算召回率 (Recall) = 真正例 / (真正例 + 假反例)
    recall = true_positives / len(relevant_docs) if len(relevant_docs) > 0 else 0.0
    
    # 计算F1 Score
    f1_score = 2 * (precision * recall) / (precision + recall) if (precision + recall) > 0 else 0.0
    
    result = {
        "精确率": round(precision, 3),
        "召回率": round(recall, 3),
        "F1分数": round(f1_score, 3),
        "precision": round(precision, 3),
        "recall": round(recall, 3),
        "f1_score": round(f1_score, 3)
    }
    
    print(f"评估结果: {result}")
    return result
