# -*- coding: utf-8 -*-
"""
相似度推荐器对比工具

本模块提供对不同相似度推荐器的性能对比功能，包括准确性、速度和相似度分布等指标。

支持的推荐器:
- SentenceTransformerRetriever: 基于Sentence Transformer的检索器

作者: QA Retrieval Team
版本: 1.0.0
"""

import time
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import platform
from typing import List, Dict, Any, Tuple
from loguru import logger
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

# 导入配置模块（自动配置HuggingFace镜像）
try:
    from config import HuggingFaceConfig
    # 配置已在config模块导入时自动完成
except ImportError:
    # 如果无法导入config模块，使用备用配置
    os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'
    os.environ['TRANSFORMERS_CACHE'] = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'models_cache')
    os.environ['SENTENCE_TRANSFORMERS_HOME'] = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'models_cache')

from questionretrieval.retrievers.similarity.sentence_transformer_retriever import SentenceTransformerRetriever
from questionretrieval.io.data_loader import load_questions, load_sample_questions, get_test_queries
from data_utils import load_sample_questions_from_json, get_question_pairs, calculate_retrieval_accuracy

# 设置中文字体支持
def setup_chinese_font():
    """设置matplotlib中文字体支持"""
    try:
        if platform.system() == 'Windows':
            plt.rcParams['font.sans-serif'] = ['SimHei', 'Microsoft YaHei', 'KaiTi', 'FangSong']
        elif platform.system() == 'Darwin':  # macOS
            plt.rcParams['font.sans-serif'] = ['Arial Unicode MS', 'Heiti TC', 'PingFang SC']
        else:  # Linux
            plt.rcParams['font.sans-serif'] = ['DejaVu Sans', 'WenQuanYi Micro Hei', 'WenQuanYi Zen Hei']
        
        plt.rcParams['axes.unicode_minus'] = False  # 解决负号显示问题
        logger.info("✅ 中文字体设置成功")
    except Exception as e:
        logger.warning(f"中文字体设置失败: {e}")

# 初始化中文字体
setup_chinese_font()


class SimilarityMethodComparison:
    """
    相似度推荐器对比工具
    
    提供多种相似度推荐器的性能对比功能，包括准确性、速度和相似度分布分析。
    """
    
    def __init__(self):
        """
        初始化对比工具
        """
        self.retrievers = {}
        self.test_questions = []
        self.test_queries = []
        
    def setup_retrievers(self):
        """
        初始化不同的相似度检索器（优化版本）
        """
        logger.info("初始化相似度检索器...")
        
        # Sentence Transformer推荐器 - 不同相似度方法（只保留相似度范围在1以内的方法）
        similarity_methods = ['cosine']  # 只保留cosine，其范围为[-1, 1]
        
        for method in similarity_methods:
            try:
                retriever = SentenceTransformerRetriever(
                    model_name='paraphrase-multilingual-MiniLM-L12-v2',
                    similarity_method=method
                )
                self.retrievers[f'SentenceTransformer_{method}'] = retriever
                logger.info(f"成功初始化 SentenceTransformer ({method})")
            except Exception as e:
                logger.error(f"初始化 SentenceTransformer ({method}) 失败: {e}")
    
    def load_test_data(self, use_sample_json: bool = True, questions: List[str] = None, queries: List[str] = None):
        """
        加载测试数据
        
        Args:
            use_sample_json: 是否使用 sample_questions.json 文件
            questions: 候选问题列表（当use_sample_json=False时使用）
            queries: 查询问题列表（当use_sample_json=False时使用）
        """
        if use_sample_json:
            # 从 sample_questions.json 加载数据
            logger.info("从 sample_questions.json 加载测试数据...")
            candidate_questions, query_questions = load_sample_questions_from_json()
            self.test_questions = candidate_questions
            self.test_queries = query_questions
            self.question_pairs = get_question_pairs()  # 保存问题对用于准确性计算
        else:
            # 使用传入的数据
            self.test_questions = questions or []
            if queries is None and questions:
                # 从候选问题中随机选择一些作为查询
                np.random.seed(42)
                query_indices = np.random.choice(len(questions), min(3, len(questions)), replace=False)
                self.test_queries = [questions[i] for i in query_indices]
            else:
                self.test_queries = queries or []
            self.question_pairs = []
            
        # 为所有检索器加载候选问题
        for name, retriever in self.retrievers.items():
            try:
                retriever.load_candidates(self.test_questions)
                logger.info(f"为 {name} 加载了 {len(self.test_questions)} 个候选问题")
            except Exception as e:
                logger.error(f"为 {name} 加载候选问题失败: {e}")
    
    def compare_accuracy(self, top_k: int = 5) -> Dict[str, Any]:
        """
        比较不同检索器的准确性
        
        Args:
            top_k: 计算Top-K准确率
            
        Returns:
            包含准确性指标的字典
        """
        logger.info(f"开始准确性比较 (Top-{top_k})...")
        
        results = {}
        
        for name, retriever in self.retrievers.items():
            try:
                if hasattr(self, 'question_pairs') and self.question_pairs:
                    # 使用 sample_questions.json 数据计算准确性指标
                    try:
                        metrics = calculate_retrieval_accuracy(
                            retriever, self.question_pairs, top_k=top_k
                        )
                        results[name] = metrics
                    except Exception as e:
                        logger.error(f"{name} 计算准确性指标时出错: {e}")
                        results[name] = {
                            'top_1_hit_rate': 0.0,
                            'top_k_hit_rate': 0.0,
                            'mrr': 0.0,
                            'avg_similarity': 0.0
                        }
                else:
                    # 使用原有的相似度计算方法
                    similarities = []
                    
                    for query in self.test_queries:
                        recommendations = retriever.recommend(
                            query_question=query,
                            top_k=top_k
                        )
                        
                        if recommendations:
                            # 推荐器返回元组格式: (question, final_score, similarity) 或详细格式
                            # 相似度在元组的最后一个位置
                            avg_similarity = np.mean([r[-1] for r in recommendations])
                            similarities.append(avg_similarity)
                    
                    if similarities:
                        results[name] = {
                            'avg_similarity': np.mean(similarities),
                            'std_similarity': np.std(similarities),
                            'min_similarity': np.min(similarities),
                            'max_similarity': np.max(similarities)
                        }
                
                logger.info(f"{name} 准确性指标计算完成")
                
            except Exception as e:
                logger.error(f"{name} 准确性测试失败: {e}")
                results[name] = {'error': str(e)}
        
        logger.info("✅ 准确性比较完成")
        return results
    
    def compare_speed(self, num_queries: int = 3) -> Dict[str, Any]:
        """
        比较不同检索器的推理速度
        
        Args:
            num_queries: 测试查询数量
            
        Returns:
            包含速度对比结果的字典
        """
        logger.info(f"开始速度对比 (查询数量={num_queries})...")
        
        test_queries = self.test_queries[:num_queries]
        results = {}
        
        for name, retriever in self.retrievers.items():
            try:
                times = []
                
                for query in test_queries:
                    start_time = time.time()
                    _ = retriever.recommend(query_question=query, top_k=5)
                    end_time = time.time()
                    times.append(end_time - start_time)
                
                results[name] = {
                    'avg_time': np.mean(times),
                    'std_time': np.std(times),
                    'min_time': np.min(times),
                    'max_time': np.max(times),
                    'total_time': np.sum(times)
                }
                
                logger.info(f"{name} 平均推理时间: {results[name]['avg_time']:.3f}秒")
                
            except Exception as e:
                logger.error(f"{name} 速度测试失败: {e}")
                results[name] = {'error': str(e)}
        
        return results
    
    def compare_similarity_distribution(self) -> Dict[str, Any]:
        """
        比较不同检索器的相似度分布
        
        Returns:
            包含相似度分布对比结果的字典
        """
        logger.info("开始相似度分布对比...")
        
        results = {}
        
        for name, retriever in self.retrievers.items():
            try:
                all_similarities = []
                
                for query in self.test_queries:
                    recommendations = retriever.recommend(
                        query_question=query,
                        top_k=10
                    )
                    
                    # 推荐器返回元组格式，相似度在最后一个位置
                    similarities = [r[-1] for r in recommendations]
                    all_similarities.extend(similarities)
                
                if all_similarities:
                    results[name] = {
                        'similarities': all_similarities,
                        'mean': np.mean(all_similarities),
                        'std': np.std(all_similarities),
                        'median': np.median(all_similarities),
                        'q25': np.percentile(all_similarities, 25),
                        'q75': np.percentile(all_similarities, 75)
                    }
                
            except Exception as e:
                logger.error(f"{name} 相似度分布分析失败: {e}")
                results[name] = {'error': str(e)}
        
        return results
    
    def visualize_comparison(self, accuracy_results: Dict, speed_results: Dict, 
                           distribution_results: Dict, save_path: str = None):
        """
        可视化对比结果 (SCI论文风格)
        
        Args:
            accuracy_results: 准确性对比结果
            speed_results: 速度对比结果
            distribution_results: 相似度分布对比结果
            save_path: 保存图片的路径
        """
        # SCI论文风格设置
        plt.rcParams.update({
            'font.size': 12,
            'font.family': 'serif',
            'font.serif': ['Times New Roman', 'DejaVu Serif', 'serif'],
            'axes.linewidth': 1.2,
            'axes.spines.top': False,
            'axes.spines.right': False,
            'axes.grid': True,
            'grid.alpha': 0.3,
            'grid.linewidth': 0.5,
            'legend.frameon': False,
            'legend.fontsize': 10,
            'xtick.direction': 'in',
            'ytick.direction': 'in',
            'xtick.major.size': 4,
            'ytick.major.size': 4,
            'figure.dpi': 300
        })
        
        # 设置中文字体支持
        setup_chinese_font()
        
        # 创建图形，使用更专业的尺寸比例
        fig, axes = plt.subplots(2, 2, figsize=(12, 10))
        fig.suptitle('Similarity-based Retriever Performance Comparison', 
                    fontsize=14, fontweight='bold', y=0.95)
        
        # SCI论文标准配色方案
        sci_colors = ['#2E86AB', '#A23B72', '#F18F01', '#C73E1D', '#592E83', '#1B998B']
        
        # 1. 平均相似度对比
        ax1 = axes[0, 0]
        methods = []
        avg_similarities = []
        
        for name, result in accuracy_results.items():
            # 兼容新旧数据结构
            if 'avg_similarity' in result:
                methods.append(name.replace('SentenceTransformer_', ''))
                avg_similarities.append(result['avg_similarity'])
            elif 'top1_hit_rate' in result:
                # 如果使用新的检索准确性指标，使用top1命中率作为替代
                methods.append(name.replace('SentenceTransformer_', ''))
                avg_similarities.append(result['top1_hit_rate'])
        
        if methods:
            bars1 = ax1.bar(methods, avg_similarities, 
                           color=sci_colors[:len(methods)], 
                           edgecolor='black', linewidth=0.8, alpha=0.8)
            ax1.set_title('(a) Average Similarity Comparison', 
                         fontweight='bold', fontsize=12, pad=15)
            ax1.set_ylabel('Average Similarity', fontsize=11)
            ax1.tick_params(axis='x', rotation=45, labelsize=10)
            ax1.tick_params(axis='y', labelsize=10)
            ax1.set_ylim(0, max(avg_similarities) * 1.15)
            
            # 添加数值标签
            for bar, val in zip(bars1, avg_similarities):
                ax1.text(bar.get_x() + bar.get_width()/2, bar.get_height() + max(avg_similarities) * 0.02,
                        f'{val:.3f}', ha='center', va='bottom', fontsize=9, fontweight='bold')
        
        # 2. 推理时间对比
        ax2 = axes[0, 1]
        methods = []
        avg_times = []
        
        for name, result in speed_results.items():
            if 'avg_time' in result:
                methods.append(name.replace('SentenceTransformer_', ''))
                avg_times.append(result['avg_time'])
        
        if methods:
            bars2 = ax2.bar(methods, avg_times, 
                           color=sci_colors[:len(methods)], 
                           edgecolor='black', linewidth=0.8, alpha=0.8)
            ax2.set_title('(b) Average Inference Time Comparison', 
                         fontweight='bold', fontsize=12, pad=15)
            ax2.set_ylabel('Average Time (seconds)', fontsize=11)
            ax2.tick_params(axis='x', rotation=45, labelsize=10)
            ax2.tick_params(axis='y', labelsize=10)
            ax2.set_ylim(0, max(avg_times) * 1.15)
            
            # 添加数值标签
            for bar, val in zip(bars2, avg_times):
                ax2.text(bar.get_x() + bar.get_width()/2, bar.get_height() + max(avg_times) * 0.02,
                        f'{val:.3f}', ha='center', va='bottom', fontsize=9, fontweight='bold')
        
        # 3. 相似度分布箱线图
        ax3 = axes[1, 0]
        distribution_data = []
        distribution_labels = []
        
        for name, result in distribution_results.items():
            if 'similarities' in result:
                distribution_data.append(result['similarities'])
                distribution_labels.append(name.replace('SentenceTransformer_', ''))
        
        if distribution_data:
            # 创建专业的箱线图
            bp = ax3.boxplot(distribution_data, labels=distribution_labels, 
                           patch_artist=True, notch=True, 
                           boxprops=dict(facecolor='lightblue', alpha=0.7, linewidth=1.2),
                           whiskerprops=dict(linewidth=1.2),
                           capprops=dict(linewidth=1.2),
                           medianprops=dict(color='red', linewidth=2),
                           flierprops=dict(marker='o', markerfacecolor='red', markersize=4, alpha=0.6))
            
            # 为每个箱子设置不同颜色
            for patch, color in zip(bp['boxes'], sci_colors[:len(distribution_data)]):
                patch.set_facecolor(color)
                patch.set_alpha(0.7)
            
            ax3.set_title('(c) Similarity Distribution Comparison', 
                         fontweight='bold', fontsize=12, pad=15)
            ax3.set_ylabel('Similarity Score', fontsize=11)
            ax3.tick_params(axis='x', rotation=45, labelsize=10)
            ax3.tick_params(axis='y', labelsize=10)
        
        # 4. 统计指标对比
        ax4 = axes[1, 1]
        metrics = ['Mean', 'Std Dev', 'Median']
        x = np.arange(len(metrics))
        
        # 计算合适的柱状图宽度
        n_methods = len(distribution_results)
        width = 0.8 / n_methods if n_methods > 0 else 0.15
        
        for i, (name, result) in enumerate(distribution_results.items()):
            if 'mean' in result:
                values = [result['mean'], result['std'], result['median']]
                label = name.replace('SentenceTransformer_', '')
                ax4.bar(x + i * width - (n_methods - 1) * width / 2, values, width, 
                       label=label, color=sci_colors[i % len(sci_colors)], 
                       alpha=0.8, edgecolor='black', linewidth=0.8)
        
        ax4.set_title('(d) Statistical Metrics Comparison', 
                     fontweight='bold', fontsize=12, pad=15)
        ax4.set_ylabel('Value', fontsize=11)
        ax4.set_xlabel('Statistical Metrics', fontsize=11)
        ax4.set_xticks(x)
        ax4.set_xticklabels(metrics, fontsize=10)
        ax4.tick_params(axis='y', labelsize=10)
        ax4.legend(fontsize=9, loc='upper right', frameon=True, 
                  fancybox=True, shadow=True, framealpha=0.9)
        
        # 调整布局
        plt.tight_layout(pad=2.0)
        
        # 保存图片
        if save_path:
            plt.savefig(save_path, dpi=300, bbox_inches='tight', 
                       facecolor='white', edgecolor='none')
            logger.info(f"Comparison chart saved to: {save_path}")
        
        plt.show()
    
    def run_full_comparison(self, questions: List[str], queries: List[str] = None, 
                          save_plot: str = None) -> Dict[str, Any]:
        """
        运行完整的对比分析
        
        Args:
            questions: 候选问题列表
            queries: 查询问题列表
            save_plot: 保存图表的路径
            
        Returns:
            完整的对比结果
        """
        logger.info("开始完整的相似度检索器对比分析...")
        
        # 设置检索器和数据
        self.setup_retrievers()
        self.load_test_data(questions, queries)
        
        # 运行各项对比
        accuracy_results = self.compare_accuracy()
        speed_results = self.compare_speed()
        distribution_results = self.compare_similarity_distribution()
        
        # 生成可视化
        self.visualize_comparison(accuracy_results, speed_results, 
                                distribution_results, save_plot)
        
        # 生成总结
        summary = self._generate_summary(accuracy_results, speed_results, distribution_results)
        
        results = {
            'accuracy': accuracy_results,
            'speed': speed_results,
            'distribution': distribution_results,
            'summary': summary
        }
        
        logger.info("相似度推荐器对比分析完成")
        return results
    
    def _generate_summary(self, accuracy_results: Dict, speed_results: Dict, 
                         distribution_results: Dict) -> Dict[str, str]:
        """
        生成对比结果总结
        
        Args:
            accuracy_results: 准确性结果
            speed_results: 速度结果
            distribution_results: 分布结果
            
        Returns:
            总结信息
        """
        summary = {}
        
        # 找出准确性最好的方法
        best_accuracy = None
        best_accuracy_score = -1
        for name, result in accuracy_results.items():
            # 兼容新旧数据结构
            score = result.get('avg_similarity', result.get('top1_hit_rate', 0))
            if score > best_accuracy_score:
                best_accuracy_score = score
                best_accuracy = name
        
        # 找出速度最快的方法
        fastest_method = None
        fastest_time = float('inf')
        for name, result in speed_results.items():
            if 'avg_time' in result and result['avg_time'] < fastest_time:
                fastest_time = result['avg_time']
                fastest_method = name
        
        # 找出相似度分布最稳定的方法
        most_stable = None
        lowest_std = float('inf')
        for name, result in distribution_results.items():
            if 'std' in result and result['std'] < lowest_std:
                lowest_std = result['std']
                most_stable = name
        
        summary['best_accuracy'] = best_accuracy
        summary['fastest_method'] = fastest_method
        summary['most_stable'] = most_stable
        summary['best_accuracy_score'] = f"{best_accuracy_score:.3f}"
        summary['fastest_time'] = f"{fastest_time:.3f}秒"
        summary['lowest_std'] = f"{lowest_std:.3f}"
        
        return summary


def main():
    """
    主函数：运行相似度方法比较
    """
    try:
        # 创建reports目录
        reports_dir = "reports"
        if not os.path.exists(reports_dir):
            os.makedirs(reports_dir)
        
        # 创建比较器
        comparator = SimilarityMethodComparison()
        
        # 设置检索器
        comparator.setup_retrievers()
        
        # 加载测试数据（默认使用 sample_questions.json）
        logger.info("加载测试数据...")
        comparator.load_test_data(use_sample_json=True)
        
        # 进行各种比较
        logger.info("\n" + "="*50)
        logger.info("开始相似度方法性能比较")
        logger.info("="*50)
        
        # 1. 准确性比较
        accuracy_results = comparator.compare_accuracy(top_k=5)
        
        # 2. 速度比较
        speed_results = comparator.compare_speed(num_queries=3)
        
        # 3. 相似度分布比较
        distribution_results = comparator.compare_similarity_distribution()
        
        # 4. 生成可视化图表
        comparator.visualize_comparison(accuracy_results, speed_results, distribution_results, os.path.join(reports_dir, "similarity_comparison_results.png"))
        
        # 生成总结
        summary = comparator._generate_summary(accuracy_results, speed_results, distribution_results)
        
        print("\n=== 对比结果总结 ===")
        print(f"准确性最好的方法: {summary['best_accuracy']}")
        print(f"速度最快的方法: {summary['fastest_method']}")
        print(f"最稳定的方法: {summary['most_stable']}")
        
        logger.info("\n✅ 相似度方法比较完成！")
        
    except Exception as e:
        logger.error(f"❌ 运行过程中出现错误: {str(e)}")
        raise


if __name__ == "__main__":
    main()