# -*- coding: utf-8 -*-
"""
Embedding + Rerank 对比实验
使用bge-m3作为统一的embedding基础，对比四种rerank模型的效果
"""

# 必须在导入任何HuggingFace相关库之前设置环境变量
from ...config import *  # 设置HuggingFace镜像环境变量
from ...config import (
    get_rerank_model_config, get_rerank_experiment_config,
    RERANK_EVALUATION_CONFIG, RERANK_VISUALIZATION_CONFIG,
    DEFAULT_DEVICE, MODELS_CACHE_DIR, RESULTS_CACHE_DIR
)
from ...tools.cache_manager import cache_manager

import questionretrieval

import os
import json
import time
from typing import List, Dict, Any, Tuple, Optional
from dataclasses import dataclass
import numpy as np
import pandas as pd
from sentence_transformers import SentenceTransformer
from sklearn.metrics.pairwise import cosine_similarity
import matplotlib.pyplot as plt
import seaborn as sns
import huggingface_hub
from concurrent.futures import ThreadPoolExecutor, as_completed
from loguru import logger

from .bge_reranker import BGEReranker
from .evaluation_metrics import RetrievalEvaluator


@dataclass
class EmbeddingRerankResult:
    """
    Embedding + Rerank 实验结果
    """
    model_name: str
    query: str
    original_candidates: List[str]
    embedding_scores: List[float]
    rerank_scores: List[float]
    final_ranking: List[Tuple[str, float]]
    embedding_time: float
    rerank_time: float
    total_time: float


class EmbeddingRerankComparison:
    """
    Embedding + Rerank 对比实验类
    
    使用bge-m3进行初始embedding检索，然后用不同的rerank模型进行重排序
    """
    
    def __init__(self, embedding_model: str = None, 
                 top_k_embedding: int = None, top_k_rerank: int = None,
                 device: Optional[str] = None, **kwargs):
        """
        初始化对比实验
        
        Args:
            embedding_model: embedding模型名称
            top_k_embedding: embedding阶段返回的候选数量
            top_k_rerank: rerank阶段返回的最终结果数量
            device: 设备，如果为None则自动检测可用设备
            **kwargs: 其他配置参数
        """
        import torch
        
        # 获取配置
        experiment_config = get_rerank_experiment_config('embedding_rerank')
         
        self.embedding_model_name = embedding_model or 'BAAI/bge-m3'
        self.top_k_embedding = top_k_embedding or experiment_config['top_k_embedding']
        self.top_k_rerank = top_k_rerank or experiment_config['top_k_rerank']
        
        # 自动检测设备
        if device is None:
            self.device = DEFAULT_DEVICE
            if self.device == 'auto':
                if torch.cuda.is_available():
                    self.device = 'cuda'
                else:
                    self.device = 'cpu'
                    logger.warning("CUDA不可用，使用CPU设备")
        else:
            self.device = device
            
        self.config = kwargs
        
        # 初始化embedding模型
        self.embedding_model = None
        self.document_embeddings = None
        self.documents = []
        
        # 初始化rerank模型（仅BGE系列）
        rerank_models = ['bge-reranker-base', 'bge-reranker-large', 'bge-reranker-v2-m3']
        self.rerankers = {}
        for model_name in rerank_models:
            self.rerankers[model_name] = BGEReranker(model_name, device=self.device)
        
        # 实验结果
        self.results = []
        
        logger.info(f"初始化Embedding+Rerank对比实验，embedding模型: {embedding_model}")
    
    def load_embedding_model(self) -> None:
        """
        加载embedding模型
        """
        if self.embedding_model is not None:
            logger.info("Embedding模型已加载")
            return
        
        try:
            logger.info(f"🌐 从HuggingFace国内镜像加载embedding模型: {self.embedding_model_name}")
            logger.info(f"🚀 镜像地址: {HF_MIRROR_ENDPOINT}")
            logger.info(f"📁 使用缓存目录: {cache_manager.cache_dir}")
            
            # 加载SentenceTransformer模型
            # 使用huggingface_hub直接下载模型到本地缓存
            import huggingface_hub
            
            try:
                # 尝试直接下载模型到缓存目录
                model_path = huggingface_hub.snapshot_download(
                    self.embedding_model_name,
                    cache_dir=MODELS_CACHE_DIR,
                    endpoint=HF_MIRROR_ENDPOINT
                )
                
                # 使用下载的本地模型路径
                self.embedding_model = SentenceTransformer(
                    model_path,
                    device=self.device,
                    trust_remote_code=True
                )
                
            except Exception as e:
                logger.warning(f"镜像下载失败，尝试备用方法: {e}")
                # 备用方案：使用环境变量强制设置
                import os
                os.environ["HF_ENDPOINT"] = HF_MIRROR_ENDPOINT
                os.environ["HUGGINGFACE_HUB_URL"] = HF_MIRROR_ENDPOINT
                
                self.embedding_model = SentenceTransformer(
                    self.embedding_model_name,
                    device=self.device,
                    cache_folder=MODELS_CACHE_DIR,
                    trust_remote_code=True
                )
            
            logger.info(f"✅ Embedding模型加载成功: {self.embedding_model_name}")
            
        except Exception as e:
            logger.error(f"加载embedding模型失败: {e}")
            raise
    
    def load_documents(self, documents: List[str]) -> None:
        """
        加载文档并计算embeddings
        
        Args:
            documents: 文档列表
        """
        if not self.embedding_model:
            self.load_embedding_model()
        
        logger.info(f"正在计算 {len(documents)} 个文档的embeddings")
        
        start_time = time.time()
        self.documents = documents
        self.document_embeddings = self.embedding_model.encode(
            documents, 
            convert_to_tensor=True,
            show_progress_bar=True
        )
        end_time = time.time()
        
        logger.info(f"文档embeddings计算完成，耗时: {end_time - start_time:.2f}秒")
    
    def _get_embedding_candidates(self, query: str) -> Tuple[List[str], List[float], float]:
        """
        使用embedding模型获取初始候选文档
        
        Args:
            query: 查询文本
            
        Returns:
            Tuple[List[str], List[float], float]: (候选文档, 相似度分数, 耗时)
        """
        start_time = time.time()
        
        # 计算查询embedding
        query_embedding = self.embedding_model.encode([query], convert_to_tensor=True)
        
        # 计算相似度
        similarities = cosine_similarity(
            query_embedding.cpu().numpy(),
            self.document_embeddings.cpu().numpy()
        )[0]
        
        # 获取top-k候选
        top_indices = np.argsort(similarities)[::-1][:self.top_k_embedding]
        candidates = [self.documents[i] for i in top_indices]
        scores = [float(similarities[i]) for i in top_indices]
        
        end_time = time.time()
        
        return candidates, scores, end_time - start_time
    
    def run_single_comparison(self, query: str) -> Dict[str, EmbeddingRerankResult]:
        """
        运行单个查询的对比实验
        
        Args:
            query: 查询文本
            
        Returns:
            Dict[str, EmbeddingRerankResult]: 各模型的实验结果
        """
        if not self.documents:
            raise ValueError("请先加载文档")
        
        logger.info(f"开始对比实验，查询: {query[:50]}...")
        
        # 1. 获取embedding候选
        candidates, embedding_scores, embedding_time = self._get_embedding_candidates(query)
        
        results = {}
        
        # 2. 对每个rerank模型进行测试
        for model_name, reranker in self.rerankers.items():
            try:
                logger.info(f"测试rerank模型: {model_name}")
                
                # 加载模型
                if not reranker._is_loaded:
                    reranker.load_model()
                
                # 重排序
                start_time = time.time()
                rerank_result = reranker.rerank(query, candidates, self.top_k_rerank)
                rerank_time = time.time() - start_time
                
                # 提取重排序分数
                rerank_scores = [score for _, score in rerank_result]
                
                # 创建结果对象
                result = EmbeddingRerankResult(
                    model_name=model_name,
                    query=query,
                    original_candidates=candidates,
                    embedding_scores=embedding_scores,
                    rerank_scores=rerank_scores,
                    final_ranking=rerank_result,
                    embedding_time=embedding_time,
                    rerank_time=rerank_time,
                    total_time=embedding_time + rerank_time
                )
                
                results[model_name] = result
                
                logger.info(f"{model_name} 完成，总耗时: {result.total_time:.3f}秒")
                
            except Exception as e:
                logger.error(f"模型 {model_name} 测试失败: {e}")
                continue
        
        return results
    
    def run_batch_comparison(self, queries: List[str]) -> List[Dict[str, EmbeddingRerankResult]]:
        """
        运行批量查询的对比实验
        
        Args:
            queries: 查询列表
            
        Returns:
            List[Dict[str, EmbeddingRerankResult]]: 所有查询的实验结果
        """
        all_results = []
        
        for i, query in enumerate(queries):
            logger.info(f"处理查询 {i+1}/{len(queries)}")
            try:
                results = self.run_single_comparison(query)
                all_results.append(results)
                self.results.extend(results.values())
            except Exception as e:
                logger.error(f"查询 {i+1} 处理失败: {e}")
                continue
        
        return all_results
    
    def analyze_results(self) -> Dict[str, Any]:
        """
        分析实验结果
        
        Returns:
            Dict[str, Any]: 分析报告
        """
        if not self.results:
            return {"error": "没有可分析的结果"}
        
        # 按模型分组
        model_results = {}
        for result in self.results:
            model_name = result.model_name
            if model_name not in model_results:
                model_results[model_name] = []
            model_results[model_name].append(result)
        
        # 计算统计信息
        analysis = {
            'model_performance': {},
            'speed_comparison': {},
            'score_distribution': {}
        }
        
        for model_name, results in model_results.items():
            # 性能统计
            embedding_times = [r.embedding_time for r in results]
            rerank_times = [r.rerank_time for r in results]
            total_times = [r.total_time for r in results]
            
            analysis['model_performance'][model_name] = {
                'total_queries': len(results),
                'avg_embedding_time': np.mean(embedding_times),
                'avg_rerank_time': np.mean(rerank_times),
                'avg_total_time': np.mean(total_times),
                'std_total_time': np.std(total_times)
            }
            
            # 分数分布
            all_rerank_scores = []
            for r in results:
                all_rerank_scores.extend(r.rerank_scores)
            
            analysis['score_distribution'][model_name] = {
                'mean_score': np.mean(all_rerank_scores),
                'std_score': np.std(all_rerank_scores),
                'min_score': np.min(all_rerank_scores),
                'max_score': np.max(all_rerank_scores)
            }
        
        return analysis
    
    def save_results(self, output_dir: str) -> None:
        """
        保存实验结果
        
        Args:
            output_dir: 输出目录
        """
        os.makedirs(output_dir, exist_ok=True)
        
        # 保存详细结果
        results_data = []
        for result in self.results:
            results_data.append({
                'model_name': result.model_name,
                'query': result.query,
                'embedding_time': result.embedding_time,
                'rerank_time': result.rerank_time,
                'total_time': result.total_time,
                'final_ranking': result.final_ranking[:5]  # 只保存前5个结果
            })
        
        with open(os.path.join(output_dir, 'embedding_rerank_results.json'), 'w', encoding='utf-8') as f:
            json.dump(results_data, f, ensure_ascii=False, indent=2)
        
        # 保存分析报告
        analysis = self.analyze_results()
        with open(os.path.join(output_dir, 'embedding_rerank_analysis.json'), 'w', encoding='utf-8') as f:
            json.dump(analysis, f, ensure_ascii=False, indent=2)
        
        logger.info(f"实验结果已保存到: {output_dir}")
    
    def visualize_results(self, output_dir: str) -> None:
        """
        可视化实验结果
        
        Args:
            output_dir: 输出目录
        """
        if not self.results:
            logger.warning("没有结果可以可视化")
            return
        
        os.makedirs(output_dir, exist_ok=True)
        
        # 准备数据
        df_data = []
        for result in self.results:
            df_data.append({
                'Model': result.model_name,
                'Embedding Time': result.embedding_time,
                'Rerank Time': result.rerank_time,
                'Total Time': result.total_time,
                'Avg Rerank Score': np.mean(result.rerank_scores) if result.rerank_scores else 0
            })
        
        df = pd.DataFrame(df_data)
        
        # 设置中文字体
        plt.rcParams['font.sans-serif'] = ['SimHei', 'Arial Unicode MS', 'DejaVu Sans']
        plt.rcParams['axes.unicode_minus'] = False
        
        # 1. 时间对比图
        fig, axes = plt.subplots(2, 2, figsize=(15, 12))
        
        # 总时间对比
        sns.boxplot(data=df, x='Model', y='Total Time', ax=axes[0, 0])
        axes[0, 0].set_title('总处理时间对比')
        axes[0, 0].tick_params(axis='x', rotation=45)
        
        # Rerank时间对比
        sns.boxplot(data=df, x='Model', y='Rerank Time', ax=axes[0, 1])
        axes[0, 1].set_title('Rerank处理时间对比')
        axes[0, 1].tick_params(axis='x', rotation=45)
        
        # 平均分数对比
        sns.boxplot(data=df, x='Model', y='Avg Rerank Score', ax=axes[1, 0])
        axes[1, 0].set_title('平均Rerank分数对比')
        axes[1, 0].tick_params(axis='x', rotation=45)
        
        # 时间vs分数散点图
        sns.scatterplot(data=df, x='Total Time', y='Avg Rerank Score', hue='Model', ax=axes[1, 1])
        axes[1, 1].set_title('处理时间 vs 平均分数')
        
        plt.tight_layout()
        plt.savefig(os.path.join(output_dir, 'embedding_rerank_comparison.png'), dpi=300, bbox_inches='tight')
        plt.close()
        
        logger.info(f"可视化结果已保存到: {output_dir}")