# -*- coding: utf-8 -*-
"""
BERT模型对比脚本
比较hfl/chinese-roberta-wwm-ext和bert-base-chinese两个模型的性能

作者: [您的姓名]
日期: 2024
"""

import time
import numpy as np
import platform
from typing import List, Dict, Tuple
from loguru import logger
import matplotlib.pyplot as plt
import seaborn as sns
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

# 导入配置模块（自动配置HuggingFace镜像）
try:
    from config import HuggingFaceConfig
    # 配置已在config模块导入时自动完成
except ImportError:
    # 如果无法导入config模块，使用备用配置
    os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'
    os.environ['TRANSFORMERS_CACHE'] = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'models_cache')
    os.environ['SENTENCE_TRANSFORMERS_HOME'] = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'models_cache')

from questionretrieval.retrievers.bert.bert_similarity_retriever import BertSimilarityRetriever
from questionretrieval.retrievers.bert.bert_chinese_retriever import BertChineseRetriever
from questionretrieval.io.data_loader import load_questions, load_sample_questions, get_test_queries
from data_utils import load_sample_questions_from_json, get_question_pairs, calculate_retrieval_accuracy

# 设置中文字体支持
def setup_chinese_font():
    """设置matplotlib中文字体支持"""
    try:
        if platform.system() == 'Windows':
            plt.rcParams['font.sans-serif'] = ['SimHei', 'Microsoft YaHei', 'KaiTi', 'FangSong']
        elif platform.system() == 'Darwin':  # macOS
            plt.rcParams['font.sans-serif'] = ['Arial Unicode MS', 'Heiti TC', 'PingFang SC']
        else:  # Linux
            plt.rcParams['font.sans-serif'] = ['DejaVu Sans', 'WenQuanYi Micro Hei', 'WenQuanYi Zen Hei']
        
        plt.rcParams['axes.unicode_minus'] = False  # 解决负号显示问题
        logger.info("✅ 中文字体设置成功")
    except Exception as e:
        logger.warning(f"中文字体设置失败: {e}")

# 初始化中文字体
setup_chinese_font()


class BertModelComparison:
    """
    BERT模型对比器
    
    用于比较不同BERT模型在问题推荐任务上的性能，
    包括准确性、速度、相似度分布等指标。
    """

    def __init__(self):
        """
        初始化对比器
        """
        self.roberta_retriever = None
        self.bert_chinese_retriever = None
        self.test_questions = []
        self.test_queries = []
        self.comparison_results = {}

    def setup_models(self):
        """
        设置两个BERT模型
        """
        logger.info("正在初始化BERT模型...")
        
        # 初始化RoBERTa模型
        logger.info("加载 hfl/chinese-roberta-wwm-ext 模型...")
        self.roberta_retriever = BertSimilarityRetriever("hfl/chinese-roberta-wwm-ext")
        
        # 初始化BERT中文模型
        logger.info("加载 bert-base-chinese 模型...")
        self.bert_chinese_retriever = BertChineseRetriever("bert-base-chinese")
        
        logger.info("✅ 所有模型加载完成")

    def load_test_data(self, use_sample_json: bool = True, questions: List[str] = None, queries: List[str] = None):
        """
        加载测试数据
        
        Args:
            use_sample_json: 是否使用 sample_questions.json 文件
            questions: 候选问题列表（当use_sample_json=False时使用）
            queries: 查询问题列表（当use_sample_json=False时使用）
        """
        if use_sample_json:
            # 从 sample_questions.json 加载数据
            logger.info("从 sample_questions.json 加载测试数据...")
            candidate_questions, query_questions = load_sample_questions_from_json()
            self.test_questions = candidate_questions
            self.test_queries = query_questions
            self.question_pairs = get_question_pairs()  # 保存问题对用于准确性计算
        else:
            # 使用传入的数据
            self.test_questions = questions or []
            if queries is None and questions:
                # 从候选问题中选择一些作为测试查询
                self.test_queries = questions[:min(5, len(questions))]
            else:
                self.test_queries = queries or []
            self.question_pairs = []
            
        # 为两个模型加载候选问题
        logger.info("为模型加载候选问题...")
        self.roberta_retriever.load_candidates(self.test_questions)
        self.bert_chinese_retriever.load_candidates(self.test_questions)
        
        logger.info(f"✅ 加载了 {len(self.test_questions)} 个候选问题和 {len(self.test_queries)} 个测试查询")

    def compare_accuracy(self, top_k: int = 5) -> Dict:
        """
        比较两个模型的准确性
        
        Args:
            top_k: 计算Top-K准确率
            
        Returns:
            包含准确性指标的字典
        """
        logger.info(f"开始准确性比较 (Top-{top_k})...")
        
        if hasattr(self, 'question_pairs') and self.question_pairs:
            # 使用 sample_questions.json 数据计算准确性指标
            roberta_metrics = calculate_retrieval_accuracy(
                self.roberta_retriever, self.question_pairs, top_k=top_k
            )
            bert_chinese_metrics = calculate_retrieval_accuracy(
                self.bert_chinese_retriever, self.question_pairs, top_k=top_k
            )
            
            results = {
                'roberta': roberta_metrics,
                'bert_chinese': bert_chinese_metrics
            }
        else:
            # 使用原有的相似度计算方法
            roberta_results = {}
            bert_chinese_results = {}
            
            for query in self.test_queries:
                # RoBERTa模型结果
                roberta_results[query] = self.roberta_retriever.find_similar_questions(
                    query, top_k=top_k
                )
                
                # BERT中文模型结果
                bert_chinese_results[query] = self.bert_chinese_retriever.find_similar_questions(
                    query, top_k=top_k
                )
            
            # 计算平均相似度
            roberta_avg_sim = self._calculate_average_similarity(roberta_results)
            bert_chinese_avg_sim = self._calculate_average_similarity(bert_chinese_results)
            
            results = {
                'roberta': {
                    'results': roberta_results,
                    'avg_similarity': roberta_avg_sim
                },
                'bert_chinese': {
                    'results': bert_chinese_results,
                    'avg_similarity': bert_chinese_avg_sim
                }
            }
        
        logger.info("✅ 准确性比较完成")
        return results

    def compare_speed(self, num_queries: int = 10) -> Dict:
        """
        比较两个模型的推理速度
        
        Args:
            num_queries: 测试查询数量
            
        Returns:
            速度比较结果
        """
        logger.info("开始速度对比...")
        
        test_queries = self.test_queries[:num_queries]
        
        # 测试RoBERTa模型速度
        start_time = time.time()
        for query in test_queries:
            self.roberta_retriever.find_similar_questions(query, top_k=5)
        roberta_time = time.time() - start_time
        
        # 测试BERT中文模型速度
        start_time = time.time()
        for query in test_queries:
            self.bert_chinese_retriever.find_similar_questions(query, top_k=5)
        bert_chinese_time = time.time() - start_time
        
        speed_comparison = {
            'roberta': {
                'total_time': roberta_time,
                'avg_time_per_query': roberta_time / len(test_queries)
            },
            'bert_chinese': {
                'total_time': bert_chinese_time,
                'avg_time_per_query': bert_chinese_time / len(test_queries)
            }
        }
        
        logger.info(f"RoBERTa总时间: {roberta_time:.2f}s, 平均每查询: {roberta_time/len(test_queries):.3f}s")
        logger.info(f"BERT中文总时间: {bert_chinese_time:.2f}s, 平均每查询: {bert_chinese_time/len(test_queries):.3f}s")
        
        return speed_comparison

    def compare_similarity_distribution(self) -> Dict:
        """
        比较两个模型的相似度分布
        
        Returns:
            相似度分布比较结果
        """
        logger.info("开始相似度分布对比...")
        
        # 获取相似度矩阵
        roberta_matrix = self.roberta_retriever.get_similarity_matrix()
        bert_chinese_matrix = self.bert_chinese_retriever.get_similarity_matrix()
        
        # 提取上三角矩阵（排除对角线）
        roberta_similarities = roberta_matrix[np.triu_indices_from(roberta_matrix, k=1)]
        bert_chinese_similarities = bert_chinese_matrix[np.triu_indices_from(bert_chinese_matrix, k=1)]
        
        distribution_comparison = {
            'roberta': {
                'similarities': roberta_similarities,
                'mean': np.mean(roberta_similarities),
                'std': np.std(roberta_similarities),
                'min': np.min(roberta_similarities),
                'max': np.max(roberta_similarities)
            },
            'bert_chinese': {
                'similarities': bert_chinese_similarities,
                'mean': np.mean(bert_chinese_similarities),
                'std': np.std(bert_chinese_similarities),
                'min': np.min(bert_chinese_similarities),
                'max': np.max(bert_chinese_similarities)
            }
        }
        
        logger.info(f"RoBERTa相似度 - 均值: {distribution_comparison['roberta']['mean']:.4f}, 标准差: {distribution_comparison['roberta']['std']:.4f}")
        logger.info(f"BERT中文相似度 - 均值: {distribution_comparison['bert_chinese']['mean']:.4f}, 标准差: {distribution_comparison['bert_chinese']['std']:.4f}")
        
        return distribution_comparison

    def _calculate_average_similarity(self, results: Dict) -> float:
        """
        计算平均相似度
        
        Args:
            results: 推荐结果字典
            
        Returns:
            平均相似度
        """
        similarities = []
        for query_results in results.values():
            if query_results:
                similarities.append(query_results[0]['similarity'])
        
        return np.mean(similarities) if similarities else 0
    
    def generate_detailed_similarity_report(self, save_path: str = None) -> Dict:
        """
        生成详细的相似度报告，统计每个问题的相似度
        
        Args:
            save_path: 报告保存路径
            
        Returns:
            详细的相似度统计结果
        """
        logger.info("开始生成详细相似度报告...")
        
        if not save_path:
            save_path = os.path.join("reports", "bert_detailed_similarity_report.txt")
        
        # 确保reports目录存在
        os.makedirs(os.path.dirname(save_path), exist_ok=True)
        
        detailed_results = {
            'roberta': {},
            'bert_chinese': {},
            'comparison': {}
        }
        
        # 获取每个查询的详细结果
        for query in self.test_queries:
            # RoBERTa模型结果
            roberta_results = self.roberta_retriever.find_similar_questions(query, top_k=5)
            bert_chinese_results = self.bert_chinese_retriever.find_similar_questions(query, top_k=5)
            
            # 提取相似度分数
            roberta_similarities = [r['similarity'] for r in roberta_results] if roberta_results else []
            bert_chinese_similarities = [r['similarity'] for r in bert_chinese_results] if bert_chinese_results else []
            
            detailed_results['roberta'][query] = {
                'similarities': roberta_similarities,
                'max_similarity': max(roberta_similarities) if roberta_similarities else 0,
                'avg_similarity': np.mean(roberta_similarities) if roberta_similarities else 0,
                'results': roberta_results
            }
            
            detailed_results['bert_chinese'][query] = {
                'similarities': bert_chinese_similarities,
                'max_similarity': max(bert_chinese_similarities) if bert_chinese_similarities else 0,
                'avg_similarity': np.mean(bert_chinese_similarities) if bert_chinese_similarities else 0,
                'results': bert_chinese_results
            }
            
            # 比较两个模型在该查询上的表现
            roberta_max = max(roberta_similarities) if roberta_similarities else 0
            bert_chinese_max = max(bert_chinese_similarities) if bert_chinese_similarities else 0
            
            detailed_results['comparison'][query] = {
                'roberta_better': roberta_max > bert_chinese_max,
                'difference': roberta_max - bert_chinese_max,
                'roberta_max': roberta_max,
                'bert_chinese_max': bert_chinese_max
            }
        
        # 生成文本报告
        self._write_similarity_report(detailed_results, save_path)
        
        logger.info(f"✅ 详细相似度报告已保存到: {save_path}")
        return detailed_results
    
    def _write_similarity_report(self, results: Dict, save_path: str):
        """
        将相似度统计结果写入文本文件
        
        Args:
            results: 详细统计结果
            save_path: 保存路径
        """
        with open(save_path, 'w', encoding='utf-8') as f:
            f.write("BERT模型相似度详细报告\n")
            f.write("=" * 50 + "\n\n")
            
            # 总体统计
            roberta_all_similarities = []
            bert_chinese_all_similarities = []
            roberta_wins = 0
            bert_chinese_wins = 0
            
            for query in self.test_queries:
                roberta_data = results['roberta'][query]
                bert_chinese_data = results['bert_chinese'][query]
                comparison_data = results['comparison'][query]
                
                roberta_all_similarities.extend(roberta_data['similarities'])
                bert_chinese_all_similarities.extend(bert_chinese_data['similarities'])
                
                if comparison_data['roberta_better']:
                    roberta_wins += 1
                else:
                    bert_chinese_wins += 1
            
            f.write("总体统计:\n")
            f.write(f"RoBERTa模型平均相似度: {np.mean(roberta_all_similarities):.4f}\n")
            f.write(f"BERT中文模型平均相似度: {np.mean(bert_chinese_all_similarities):.4f}\n")
            f.write(f"RoBERTa模型胜出次数: {roberta_wins}/{len(self.test_queries)}\n")
            f.write(f"BERT中文模型胜出次数: {bert_chinese_wins}/{len(self.test_queries)}\n\n")
            
            # 每个查询的详细结果
            f.write("每个查询的详细结果:\n")
            f.write("-" * 50 + "\n")
            
            for i, query in enumerate(self.test_queries, 1):
                roberta_data = results['roberta'][query]
                bert_chinese_data = results['bert_chinese'][query]
                comparison_data = results['comparison'][query]
                
                f.write(f"\n{i}. 查询: {query}\n")
                f.write(f"   RoBERTa最高相似度: {roberta_data['max_similarity']:.4f}\n")
                f.write(f"   BERT中文最高相似度: {bert_chinese_data['max_similarity']:.4f}\n")
                f.write(f"   差值: {comparison_data['difference']:.4f}\n")
                f.write(f"   胜出模型: {'RoBERTa' if comparison_data['roberta_better'] else 'BERT中文'}\n")
                
                # 显示前3个推荐结果
                f.write(f"   RoBERTa前3推荐:\n")
                for j, result in enumerate(roberta_data['results'][:3], 1):
                    f.write(f"     {j}. {result['question']} (相似度: {result['similarity']:.4f})\n")
                
                f.write(f"   BERT中文前3推荐:\n")
                for j, result in enumerate(bert_chinese_data['results'][:3], 1):
                    f.write(f"     {j}. {result['question']} (相似度: {result['similarity']:.4f})\n")

    def plot_comparison_results(self, accuracy_results: Dict, speed_results: Dict, 
                              distribution_results: Dict, save_path: str = None):
        """
        Plot comparison results with SCI paper style
        
        Args:
            accuracy_results: Accuracy comparison results
            speed_results: Speed comparison results
            distribution_results: Similarity distribution comparison results
            save_path: Save path
        """
        # SCI paper style settings
        plt.rcParams.update({
            'font.family': 'serif',
            'font.serif': ['Times New Roman'],
            'font.size': 10,
            'axes.linewidth': 1.2,
            'axes.spines.top': False,
            'axes.spines.right': False,
            'axes.grid': True,
            'grid.alpha': 0.3,
            'grid.linewidth': 0.5,
            'legend.frameon': False,
            'legend.fontsize': 9,
            'xtick.direction': 'in',
            'ytick.direction': 'in',
            'xtick.major.size': 4,
            'ytick.major.size': 4
        })
        
        # SCI color scheme
        sci_colors = ['#2E86AB', '#A23B72', '#F18F01', '#C73E1D']
        
        fig, axes = plt.subplots(2, 2, figsize=(12, 10))
        fig.suptitle('BERT Model Comparison Results', fontsize=14, fontweight='bold', y=0.95)
        
        # 1. Average Similarity Comparison
        models = ['RoBERTa', 'BERT Chinese']
        # Compatible with old and new data structures
        avg_similarities = [
            accuracy_results['roberta'].get('avg_similarity', 0),
            accuracy_results['bert_chinese'].get('avg_similarity', 0)
        ]
        
        bars = axes[0, 0].bar(models, avg_similarities, color=sci_colors[:2], 
                             alpha=0.8, edgecolor='black', linewidth=0.8)
        axes[0, 0].set_title('Average Similarity Comparison', fontsize=12, fontweight='bold', pad=15)
        axes[0, 0].set_ylabel('Average Similarity', fontsize=11)
        axes[0, 0].set_ylim(0, max(avg_similarities) * 1.2 if max(avg_similarities) > 0 else 1)
        axes[0, 0].tick_params(axis='both', which='major', labelsize=10)
        
        # Add value labels on bars
        for i, (bar, v) in enumerate(zip(bars, avg_similarities)):
            height = bar.get_height()
            axes[0, 0].text(bar.get_x() + bar.get_width()/2., height + max(avg_similarities) * 0.02,
                           f'{v:.4f}', ha='center', va='bottom', fontsize=9, fontweight='bold')
        
        # 2. Inference Speed Comparison
        avg_times = [
            speed_results['roberta']['avg_time_per_query'],
            speed_results['bert_chinese']['avg_time_per_query']
        ]
        
        bars = axes[0, 1].bar(models, avg_times, color=sci_colors[:2], 
                             alpha=0.8, edgecolor='black', linewidth=0.8)
        axes[0, 1].set_title('Average Inference Time Comparison', fontsize=12, fontweight='bold', pad=15)
        axes[0, 1].set_ylabel('Time (seconds)', fontsize=11)
        axes[0, 1].set_ylim(0, max(avg_times) * 1.2 if max(avg_times) > 0 else 1)
        axes[0, 1].tick_params(axis='both', which='major', labelsize=10)
        
        # Add value labels on bars
        for i, (bar, v) in enumerate(zip(bars, avg_times)):
            height = bar.get_height()
            axes[0, 1].text(bar.get_x() + bar.get_width()/2., height + max(avg_times) * 0.02,
                           f'{v:.3f}s', ha='center', va='bottom', fontsize=9, fontweight='bold')
        
        # 3. Accuracy Comparison
        # Check if we have detailed accuracy metrics or just similarity data
        has_accuracy_metrics = 'hit_at_1' in accuracy_results['roberta']
        
        if has_accuracy_metrics:
            accuracy_metrics = ['Top-1 Hit Rate', 'Top-K Hit Rate', 'MRR', 'NDCG', 'Avg Similarity']
            roberta_accuracy = [
                accuracy_results['roberta'].get('hit_at_1', 0),
                accuracy_results['roberta'].get('hit_at_k', 0),
                accuracy_results['roberta'].get('mrr', 0),
                accuracy_results['roberta'].get('ndcg', 0),
                accuracy_results['roberta'].get('avg_similarity', 0)
            ]
            bert_chinese_accuracy = [
                accuracy_results['bert_chinese'].get('hit_at_1', 0),
                accuracy_results['bert_chinese'].get('hit_at_k', 0),
                accuracy_results['bert_chinese'].get('mrr', 0),
                accuracy_results['bert_chinese'].get('ndcg', 0),
                accuracy_results['bert_chinese'].get('avg_similarity', 0)
            ]
        else:
            # Only show average similarity when detailed metrics are not available
            accuracy_metrics = ['Avg Similarity']
            roberta_accuracy = [accuracy_results['roberta'].get('avg_similarity', 0)]
            bert_chinese_accuracy = [accuracy_results['bert_chinese'].get('avg_similarity', 0)]
        
        x_acc = np.arange(len(accuracy_metrics))
        width_acc = 0.35
        
        bars1 = axes[1, 0].bar(x_acc - width_acc/2, roberta_accuracy, width_acc, 
                              label='RoBERTa', color=sci_colors[0], alpha=0.8, 
                              edgecolor='black', linewidth=0.8)
        bars2 = axes[1, 0].bar(x_acc + width_acc/2, bert_chinese_accuracy, width_acc, 
                              label='BERT Chinese', color=sci_colors[1], alpha=0.8, 
                              edgecolor='black', linewidth=0.8)
        axes[1, 0].set_title('Accuracy Comparison', fontsize=12, fontweight='bold', pad=15)
        axes[1, 0].set_xlabel('Accuracy Metrics', fontsize=11)
        axes[1, 0].set_ylabel('Value', fontsize=11)
        axes[1, 0].set_xticks(x_acc)
        axes[1, 0].set_xticklabels(accuracy_metrics, rotation=45, ha='right', fontsize=9)
        axes[1, 0].tick_params(axis='both', which='major', labelsize=10)
        axes[1, 0].legend(loc='upper right', fontsize=9)
        
        # Set y-axis range
        all_values = roberta_accuracy + bert_chinese_accuracy
        max_val = max([v for v in all_values if v > 0]) if any(v > 0 for v in all_values) else 1
        axes[1, 0].set_ylim(0, max_val * 1.2)
        
        # Add value labels on bars
        for i, (bar1, bar2) in enumerate(zip(bars1, bars2)):
            height1 = bar1.get_height()
            height2 = bar2.get_height()
            if height1 > 0:
                axes[1, 0].text(bar1.get_x() + bar1.get_width()/2., height1 + max_val * 0.01,
                               f'{height1:.3f}', ha='center', va='bottom', fontsize=9, fontweight='bold')
            if height2 > 0:
                axes[1, 0].text(bar2.get_x() + bar2.get_width()/2., height2 + max_val * 0.01,
                               f'{height2:.3f}', ha='center', va='bottom', fontsize=9, fontweight='bold')
        
        # 4. Statistical Metrics Comparison
        metrics = ['Mean', 'Std Dev', 'Min', 'Max']
        roberta_stats = [
            distribution_results['roberta']['mean'],
            distribution_results['roberta']['std'],
            distribution_results['roberta']['min'],
            distribution_results['roberta']['max']
        ]
        bert_chinese_stats = [
            distribution_results['bert_chinese']['mean'],
            distribution_results['bert_chinese']['std'],
            distribution_results['bert_chinese']['min'],
            distribution_results['bert_chinese']['max']
        ]
        
        x = np.arange(len(metrics))
        width = 0.35
        
        bars1 = axes[1, 1].bar(x - width/2, roberta_stats, width, 
                              label='RoBERTa', color=sci_colors[0], alpha=0.8, 
                              edgecolor='black', linewidth=0.8)
        bars2 = axes[1, 1].bar(x + width/2, bert_chinese_stats, width, 
                              label='BERT Chinese', color=sci_colors[1], alpha=0.8, 
                              edgecolor='black', linewidth=0.8)
        axes[1, 1].set_title('Similarity Statistical Metrics Comparison', fontsize=12, fontweight='bold', pad=15)
        axes[1, 1].set_xlabel('Statistical Metrics', fontsize=11)
        axes[1, 1].set_ylabel('Value', fontsize=11)
        axes[1, 1].set_xticks(x)
        axes[1, 1].set_xticklabels(metrics, fontsize=10)
        axes[1, 1].tick_params(axis='both', which='major', labelsize=10)
        axes[1, 1].legend(loc='upper right', fontsize=9)
        
        # Set y-axis range
        all_stats = roberta_stats + bert_chinese_stats
        max_stat = max([v for v in all_stats if v > 0]) if any(v > 0 for v in all_stats) else 1
        axes[1, 1].set_ylim(0, max_stat * 1.2)
        
        # Add value labels on bars
        for i, (bar1, bar2) in enumerate(zip(bars1, bars2)):
            height1 = bar1.get_height()
            height2 = bar2.get_height()
            axes[1, 1].text(bar1.get_x() + bar1.get_width()/2., height1 + max_stat * 0.02,
                           f'{height1:.3f}', ha='center', va='bottom', fontsize=9, fontweight='bold')
            axes[1, 1].text(bar2.get_x() + bar2.get_width()/2., height2 + max_stat * 0.02,
                           f'{height2:.3f}', ha='center', va='bottom', fontsize=9, fontweight='bold')
        
        plt.tight_layout(pad=2.0)
        
        if save_path:
            plt.savefig(save_path, dpi=300, bbox_inches='tight', 
                       facecolor='white', edgecolor='none', format='png')
            logger.info(f"Comparison chart saved to: {save_path}")
        
        plt.show()

    def run_full_comparison(self, questions: List[str], queries: List[str] = None, 
                          save_plot: str = None) -> Dict:
        """
        运行完整的模型对比
        
        Args:
            questions: 候选问题列表
            queries: 测试查询列表
            save_plot: 图表保存路径
            
        Returns:
            完整的对比结果
        """
        logger.info("开始完整的BERT模型对比...")
        
        # 设置模型和数据
        self.setup_models()
        self.load_test_data(questions, queries)
        
        # 进行各项对比
        accuracy_results = self.compare_accuracy()
        speed_results = self.compare_speed()
        distribution_results = self.compare_similarity_distribution()
        
        # 绘制对比图表
        self.plot_comparison_results(accuracy_results, speed_results, 
                                   distribution_results, save_plot)
        
        # 汇总结果
        full_results = {
            'accuracy': accuracy_results,
            'speed': speed_results,
            'distribution': distribution_results,
            'summary': {
                'better_accuracy': 'roberta' if accuracy_results['roberta']['avg_similarity'] > accuracy_results['bert_chinese']['avg_similarity'] else 'bert_chinese',
                'faster_model': 'roberta' if speed_results['roberta']['avg_time_per_query'] < speed_results['bert_chinese']['avg_time_per_query'] else 'bert_chinese'
            }
        }
        
        logger.info("✅ 完整对比完成")
        return full_results


def main():
    """
    主函数：运行BERT模型比较
    """
    try:
        # 创建比较器
        comparator = BertModelComparison()
        
        # 设置模型
        comparator.setup_models()
        
        # 加载测试数据（默认使用 sample_questions.json）
        logger.info("加载测试数据...")
        comparator.load_test_data(use_sample_json=True)
        
        # 进行各种比较
        logger.info("\n" + "="*50)
        logger.info("开始BERT模型性能比较")
        logger.info("="*50)
        
        # 1. 准确性比较
        accuracy_results = comparator.compare_accuracy(top_k=5)
        
        # 2. 速度比较
        speed_results = comparator.compare_speed(num_queries=10)
        
        # 3. 相似度分布比较
        distribution_results = comparator.compare_similarity_distribution()
        
        # 4. 生成可视化图表
        import os
        reports_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'reports')
        os.makedirs(reports_dir, exist_ok=True)
        save_path = os.path.join(reports_dir, "bert_comparison_results.png")
        comparator.plot_comparison_results(accuracy_results, speed_results, distribution_results, save_path=save_path)
        
        # 5. 生成详细相似度报告
        logger.info("生成详细相似度报告...")
        detailed_report_path = os.path.join(reports_dir, "bert_detailed_similarity_report.txt")
        detailed_results = comparator.generate_detailed_similarity_report(save_path=detailed_report_path)
        
        # 打印总结
        print("\n=== BERT模型对比总结 ===")
        if 'avg_similarity' in accuracy_results['roberta']:
            better_accuracy = 'roberta' if accuracy_results['roberta']['avg_similarity'] > accuracy_results['bert_chinese']['avg_similarity'] else 'bert_chinese'
            print(f"准确性更好的模型: {better_accuracy}")
        
        faster_model = 'roberta' if speed_results['roberta']['avg_time_per_query'] < speed_results['bert_chinese']['avg_time_per_query'] else 'bert_chinese'
        print(f"速度更快的模型: {faster_model}")
        
        # 使用固定测试问题展示检索效果
        print("\n=== 固定测试问题检索效果对比 ===")
        fixed_query = "大伙房水库的管理部门是什么？"
        print(f"测试问题: {fixed_query}")
        
        # RoBERTa模型检索结果
        print("\n🤖 RoBERTa模型检索结果:")
        roberta_results = comparator.roberta_retriever.find_similar_questions(fixed_query, top_k=5)
        for i, result in enumerate(roberta_results, 1):
            question = result['question']
            similarity = result['similarity']
            print(f"  {i}. {question}")
            print(f"     相似度: {similarity:.3f}")
        
        # BERT Chinese模型检索结果
        print("\n🤖 BERT Chinese模型检索结果:")
        bert_results = comparator.bert_chinese_retriever.find_similar_questions(fixed_query, top_k=5)
        for i, result in enumerate(bert_results, 1):
            question = result['question']
            similarity = result['similarity']
            print(f"  {i}. {question}")
            print(f"     相似度: {similarity:.3f}")
        
        logger.info("\n✅ BERT模型比较完成！")
        
    except Exception as e:
        logger.error(f"❌ 运行过程中出现错误: {str(e)}")
        raise


if __name__ == "__main__":
    main()