#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
数据工具模块
用于加载和处理 sample_questions.json 中的测试数据

作者: QA Retrieval Team
日期: 2024
"""

import json
import os
import numpy as np
from typing import List, Tuple, Dict
from loguru import logger

def load_sample_questions_from_json(json_path: str = None) -> Tuple[List[str], List[str]]:
    """
    从 sample_questions.json 文件加载测试数据
    
    Args:
        json_path: JSON文件路径，如果为None则使用默认路径
        
    Returns:
        Tuple[List[str], List[str]]: (候选问题列表, 查询问题列表)
    """
    if json_path is None:
        # 使用默认路径
        current_dir = os.path.dirname(os.path.abspath(__file__))
        json_path = os.path.join(current_dir, 'sample_questions.json')
    
    try:
        with open(json_path, 'r', encoding='utf-8') as f:
            data = json.load(f)
        
        # 提取候选问题（expected_answer）和查询问题（query）
        candidate_questions = [item['expected_answer'] for item in data]
        query_questions = [item['query'] for item in data]
        
        logger.info(f"成功加载 {len(candidate_questions)} 个候选问题和 {len(query_questions)} 个查询问题")
        
        return candidate_questions, query_questions
        
    except FileNotFoundError:
        logger.error(f"找不到文件: {json_path}")
        raise
    except json.JSONDecodeError as e:
        logger.error(f"JSON解析错误: {e}")
        raise
    except Exception as e:
        logger.error(f"加载数据时发生错误: {e}")
        raise

def get_question_pairs() -> List[Dict[str, str]]:
    """
    获取问题对，用于评估检索质量
    
    Returns:
        List[Dict[str, str]]: 包含query和expected_answer的字典列表
    """
    current_dir = os.path.dirname(os.path.abspath(__file__))
    json_path = os.path.join(current_dir, 'sample_questions.json')
    
    try:
        with open(json_path, 'r', encoding='utf-8') as f:
            data = json.load(f)
        
        logger.info(f"成功加载 {len(data)} 个问题对")
        return data
        
    except Exception as e:
        logger.error(f"加载问题对时发生错误: {e}")
        raise

def calculate_retrieval_accuracy(retriever, question_pairs: List[Dict[str, str]], top_k: int = 5) -> Dict[str, float]:
    """
    计算检索准确性指标
    
    Args:
        retriever: 检索器实例
        question_pairs: 问题对列表
        top_k: 返回的top结果数量
        
    Returns:
        Dict[str, float]: 包含各种准确性指标的字典
    """
    total_pairs = len(question_pairs)
    hit_at_1 = 0  # Top-1命中率
    hit_at_k = 0  # Top-K命中率
    mrr_scores = []  # Mean Reciprocal Rank
    similarities = []  # 相似度分数
    ndcg_scores = []
    
    for pair in question_pairs:
        query = pair['query']
        expected = pair['expected_answer']
        
        try:
            # 执行检索
            if hasattr(retriever, 'find_similar_questions'):
                results = retriever.find_similar_questions(query, top_k=top_k)
            elif hasattr(retriever, 'recommend'):
                results = retriever.recommend(query, top_k=top_k)
            else:
                logger.warning(f"检索器 {type(retriever).__name__} 没有支持的检索方法")
                continue
            
            if not results:
                continue
                
            # 提取问题文本和相似度
            if isinstance(results[0], dict):
                # BERT检索器格式: [{'question': str, 'similarity': float}, ...]
                retrieved_questions = [r['question'] for r in results]
                top_similarity = results[0]['similarity']
            elif isinstance(results[0], tuple):
                # 元组格式检索器
                retrieved_questions = [r[0] for r in results]
                if len(results[0]) == 3:
                    # FAISS检索器格式: [(question, similarity_score, original_score), ...]
                    # 或相似度检索器格式: [(question, final_score, similarity), ...]
                    # 对于FAISS，相似度在第二个位置；对于相似度检索器，相似度在最后一个位置
                    # 通过检查检索器类型来区分
                    if hasattr(retriever, '__class__') and 'faiss' in retriever.__class__.__name__.lower():
                        top_similarity = results[0][1]  # FAISS: 第二个元素是相似度
                    else:
                        top_similarity = results[0][-1]  # 其他: 最后一个元素是相似度
                elif len(results[0]) >= 5:
                    # 聚类检索器格式: [(question, final_score, base_score, multilevel_bonus, similarity), ...]
                    top_similarity = results[0][-1]  # 最后一个元素是相似度
                else:
                    # 其他元组格式，假设最后一个元素是相似度
                    top_similarity = results[0][-1] if len(results[0]) > 1 else 1.0
            else:
                # 其他格式
                retrieved_questions = [str(r) for r in results]
                top_similarity = 1.0
            
            # 添加调试信息
            logger.debug(f"查询: {query}")
            logger.debug(f"期望答案: {expected}")
            logger.debug(f"检索结果: {retrieved_questions[:3]}...")  # 只显示前3个
            logger.debug(f"结果格式: {type(results[0])}")
            
            similarities.append(top_similarity)
            
            # 计算命中率和NDCG
            if expected in retrieved_questions:
                hit_at_k += 1
                rank = retrieved_questions.index(expected) + 1
                if rank == 1:
                    hit_at_1 += 1
                mrr_scores.append(1.0 / rank)
                # 计算NDCG@K
                dcg = 1.0 / np.log2(rank + 1)
                idcg = 1.0 / np.log2(2)  # 理想情况下正确答案在第一位
                ndcg_scores.append(dcg / idcg)
            else:
                mrr_scores.append(0.0)
                ndcg_scores.append(0.0)
                
        except Exception as e:
            logger.warning(f"处理查询 '{query}' 时发生错误: {e}")
            mrr_scores.append(0.0)
            similarities.append(0.0)
            ndcg_scores.append(0.0)
    
    # 计算最终指标
    accuracy_metrics = {
        'hit_at_1': hit_at_1 / total_pairs if total_pairs > 0 else 0.0,
        'hit_at_k': hit_at_k / total_pairs if total_pairs > 0 else 0.0,
        'mrr': sum(mrr_scores) / len(mrr_scores) if mrr_scores else 0.0,
        'ndcg': sum(ndcg_scores) / len(ndcg_scores) if ndcg_scores else 0.0,
        'avg_similarity': sum(similarities) / len(similarities) if similarities else 0.0,
        'total_queries': total_pairs,
        'successful_queries': len([s for s in similarities if s > 0])
    }
    
    # 添加调试信息
    logger.info(f"检索器 {type(retriever).__name__} 准确性指标:")
    logger.info(f"  Top-1命中率: {accuracy_metrics['hit_at_1']:.3f} ({hit_at_1}/{total_pairs})")
    logger.info(f"  Top-K命中率: {accuracy_metrics['hit_at_k']:.3f} ({hit_at_k}/{total_pairs})")
    logger.info(f"  MRR: {accuracy_metrics['mrr']:.3f}")
    logger.info(f"  NDCG: {accuracy_metrics['ndcg']:.3f}")
    logger.info(f"  平均相似度: {accuracy_metrics['avg_similarity']:.3f}")
    
    return accuracy_metrics