# -*- coding: utf-8 -*-
"""
Sentence Transformer相似度检索器
基于Sentence Transformers模型计算问题间的语义相似度进行检索

作者: [您的姓名]
日期: 2024
"""

import os
import time
from typing import List, Tuple, Dict, Optional
import numpy as np
from loguru import logger
from sentence_transformers import SentenceTransformer
from sklearn.metrics.pairwise import cosine_similarity, euclidean_distances, manhattan_distances, linear_kernel
from ...config import *  # 导入配置，包括镜像设置
from ...tools.cache_manager import cache_manager

# 使用全局缓存管理器
model_cache_dir = cache_manager.cache_dir


class SentenceTransformerRetriever:
    """
    基于Sentence Transformers的问题检索器
    
    该检索器使用sentence-transformers库计算问题间的语义相似度，
    支持多种相似度计算方法，适用于多语言文本相似度计算。
    
    特点：
    - 支持多种相似度计算方法（余弦、皮尔逊相关系数、欧几里得、曼哈顿、线性核）
    - 支持多语言模型
    - 自动阈值设置
    - 综合评分机制
    """

    def __init__(self, model_name: str = 'paraphrase-multilingual-MiniLM-L12-v2',
                 similarity_method: str = 'cosine'):
        """
        初始化检索器

        Args:
            model_name: sentence-transformers模型名称
            similarity_method: 相似度计算方法 ('cosine', 'pearson', 'euclidean', 'manhattan', 'dot', 'linear')
                cosine : 范围 [-1, 1]，通常正值表示相似
                pearson : 皮尔逊相关系数，范围 [-1, 1]，归一化后为 [0, 1]
                euclidean : 经过转换后范围 [0, 1]，但通常值较小
                manhattan : 经过转换后范围 [0, 1]，但通常值较小
                dot : 点积相似度，未归一化的余弦相似度，归一化后为 [0, 1]
                linear : 范围取决于向量，可能是负值或很小的正值
        """
        self.model_name = model_name
        self.similarity_method = similarity_method
        
        try:
            # 检查本地缓存是否存在
            cached_model_path = os.path.join(model_cache_dir, model_name.replace('/', '_'))
            if os.path.exists(cached_model_path):
                logger.info(f"🔄 从本地缓存加载模型: {model_name}")
                self.model = SentenceTransformer(cached_model_path)
            else:
                logger.info(f"📥 首次下载模型: {model_name}，将缓存到 {model_cache_dir}")
                logger.info(f"🚀 使用HuggingFace国内镜像: {HF_MIRROR_ENDPOINT}")
                self.model = SentenceTransformer(model_name, cache_folder=model_cache_dir)
                # 保存到本地缓存
                self.model.save(cached_model_path)
            logger.info(f"✅ 成功加载模型: {model_name}")
        except Exception as e:
            logger.warning(f"模型加载失败: {e}")
            logger.info("尝试使用备用模型...")
            # 备用方案：使用更小的模型
            backup_model = 'all-MiniLM-L6-v2'
            cached_backup_path = os.path.join(model_cache_dir, backup_model.replace('/', '_'))
            if os.path.exists(cached_backup_path):
                self.model = SentenceTransformer(cached_backup_path)
            else:
                self.model = SentenceTransformer(backup_model, cache_folder=model_cache_dir)
                self.model.save(cached_backup_path)

        self.similarity_method = similarity_method
        self.candidate_questions = []
        self.candidate_scores = []
        self.candidate_embeddings = None
        
        # 相似度方法的默认阈值（所有方法现在都归一化到[0,1]范围）
        self.default_thresholds = {
            'cosine': {'min': 0.2, 'max': 0.95},
            'pearson': {'min': 0.6, 'max': 0.975},  # 皮尔逊相关系数阈值
            'euclidean': {'min': 0.01, 'max': 0.8},
            'manhattan': {'min': 0.01, 'max': 0.8},
            'dot': {'min': 0.1, 'max': 0.9},  # 点积相似度阈值
            'linear': {'min': 0.01, 'max': 0.9}  # min-max归一化后的阈值
        }

    def _calculate_similarity(self, query_embedding: np.ndarray, candidate_embeddings: np.ndarray) -> np.ndarray:
        """
        计算相似度的统一接口

        Args:
            query_embedding: 查询问题的嵌入向量
            candidate_embeddings: 候选问题的嵌入向量

        Returns:
            相似度数组
        """
        if self.similarity_method == 'cosine':
            return cosine_similarity(query_embedding, candidate_embeddings)[0]

        elif self.similarity_method == 'pearson':
            # 皮尔逊相关系数相似度
            from scipy.stats import pearsonr
            similarities = []
            for candidate_emb in candidate_embeddings:
                corr, _ = pearsonr(query_embedding[0], candidate_emb)
                # 处理NaN值（当标准差为0时）
                if np.isnan(corr):
                    corr = 0.0
                similarities.append(corr)
            similarities = np.array(similarities)
            # 将[-1,1]范围映射到[0,1]
            return (similarities + 1) / 2

        elif self.similarity_method == 'euclidean':
            # 欧几里得距离转换为相似度
            distances = euclidean_distances(query_embedding, candidate_embeddings)[0]
            return 1 / (1 + distances)

        elif self.similarity_method == 'manhattan':
            # 曼哈顿距离转换为相似度
            distances = manhattan_distances(query_embedding, candidate_embeddings)[0]
            return 1 / (1 + distances)

        elif self.similarity_method == 'dot':
            # 点积相似度（未归一化的余弦相似度）
            dot_scores = np.dot(query_embedding, candidate_embeddings.T)[0]
            # 使用min-max归一化到[0,1]
            min_score = np.min(dot_scores)
            max_score = np.max(dot_scores)
            if max_score > min_score:
                return (dot_scores - min_score) / (max_score - min_score)
            else:
                return np.ones_like(dot_scores) * 0.5

        elif self.similarity_method == 'linear':
            # 线性核相似度（归一化到[0,1]）
            linear_scores = linear_kernel(query_embedding, candidate_embeddings)[0]
            # 使用min-max归一化
            min_score = np.min(linear_scores)
            max_score = np.max(linear_scores)
            if max_score > min_score:
                return (linear_scores - min_score) / (max_score - min_score)
            else:
                return np.ones_like(linear_scores) * 0.5  # 如果所有值相同，返回0.5

        else:
            raise ValueError(f"不支持的相似度方法: {self.similarity_method}")

    def load_candidates(self, questions: List[str], scores: List[float] = None):
        """
        加载候选问题

        Args:
            questions: 候选问题列表
            scores: 候选问题对应的分数（可选）
        """
        self.candidate_questions = questions
        self.candidate_scores = scores or [1.0] * len(questions)

        # 预计算候选问题的embeddings
        logger.info("正在计算候选问题的嵌入向量...")
        self.candidate_embeddings = self.model.encode(questions)
        logger.info(f"✅ 完成 {len(questions)} 个问题的嵌入向量计算")

    def recommend(self, query_question: str, top_k: int = 5,
                  similarity_threshold: float = None,
                  similarity_weight: float = 0.7,
                  max_similarity_threshold: float = None) -> List[Tuple[str, float, float]]:
        """
        根据查询问题推荐相似问题

        Args:
            query_question: 查询问题
            top_k: 返回的推荐数量
            similarity_threshold: 最小相似度阈值（None时自动设置）
            similarity_weight: 相似度在综合评分中的权重
            max_similarity_threshold: 最大相似度阈值（None时自动设置）

        Returns:
            推荐问题列表，每个元素为(问题, 综合分数, 相似度)
        """
        if not self.candidate_questions:
            raise ValueError("请先使用load_candidates()加载候选问题")

        # 自动设置阈值
        if similarity_threshold is None:
            similarity_threshold = self.default_thresholds[self.similarity_method]['min']

        if max_similarity_threshold is None:
            max_similarity_threshold = self.default_thresholds[self.similarity_method]['max']

        # 计算查询问题的embedding
        query_embedding = self.model.encode([query_question])

        # 计算相似度
        similarities = self._calculate_similarity(query_embedding, self.candidate_embeddings)

        # 过滤相似度范围：既不能太低也不能太高
        valid_indices = []
        for i, sim in enumerate(similarities):
            if similarity_threshold < sim < max_similarity_threshold:
                valid_indices.append(i)

        if not valid_indices:
            logger.warning(f"没有找到相似度在 [{similarity_threshold}, {max_similarity_threshold}] 范围内的问题")
            return []

        # 计算综合分数
        max_score = max(self.candidate_scores) if self.candidate_scores else 1.0
        recommendations = []

        for i in valid_indices:
            question = self.candidate_questions[i]
            similarity = similarities[i]
            score = self.candidate_scores[i]

            # 综合评分：相似度 * 权重 + 归一化分数 * (1-权重)
            final_score = similarity * similarity_weight + (score / max_score) * (1 - similarity_weight)
            recommendations.append((question, final_score, similarity))

        # 按综合分数排序
        recommendations.sort(key=lambda x: x[1], reverse=True)

        # 返回top_k个推荐
        return recommendations[:top_k]

    def batch_recommend(self, queries: List[str], top_k: int = 5) -> Dict[str, List[Tuple[str, float, float]]]:
        """
        批量推荐
        
        Args:
            queries: 查询问题列表
            top_k: 每个查询返回的推荐数量
            
        Returns:
            字典，键为查询问题，值为推荐结果列表
        """
        results = {}
        for query in queries:
            results[query] = self.recommend(query, top_k)
        return results

    def set_similarity_method(self, method: str):
        """
        设置相似度计算方法
        
        Args:
            method: 相似度方法 ('cosine', 'pearson', 'euclidean', 'manhattan', 'dot', 'linear')
                cosine: 余弦相似度
                pearson: 皮尔逊相关系数
                euclidean: 欧几里得距离
                manhattan: 曼哈顿距离
                dot: 点积相似度
                linear: 线性核
        """
        supported_methods = ['cosine', 'pearson', 'euclidean', 'manhattan', 'dot', 'linear']
        if method not in supported_methods:
            raise ValueError(f"不支持的相似度方法: {method}，支持的方法: {supported_methods}")
        self.similarity_method = method
        logger.info(f"相似度计算方法已设置为: {method}")

    def get_similarity_matrix(self) -> np.ndarray:
        """
        获取候选问题间的相似度矩阵
        
        Returns:
            相似度矩阵
        """
        if self.candidate_embeddings is None:
            raise ValueError("请先加载候选问题")
        
        if self.similarity_method == 'cosine':
            return cosine_similarity(self.candidate_embeddings)
        else:
            # 对于其他方法，逐一计算
            n = len(self.candidate_embeddings)
            similarity_matrix = np.zeros((n, n))
            for i in range(n):
                similarities = self._calculate_similarity(
                    self.candidate_embeddings[i:i+1], 
                    self.candidate_embeddings
                )
                similarity_matrix[i] = similarities
            return similarity_matrix

    def save_embeddings(self, filepath: str):
        """
        保存计算好的嵌入向量
        
        Args:
            filepath: 保存路径
        """
        if self.candidate_embeddings is not None:
            np.save(filepath, self.candidate_embeddings)
            logger.info(f"嵌入向量已保存到: {filepath}")
        else:
            logger.error("没有可保存的嵌入向量")

    def load_embeddings(self, filepath: str):
        """
        加载预计算的嵌入向量
        
        Args:
            filepath: 嵌入向量文件路径
        """
        try:
            self.candidate_embeddings = np.load(filepath)
            logger.info(f"嵌入向量已从 {filepath} 加载")
        except Exception as e:
            logger.error(f"加载嵌入向量失败: {e}")

    def evaluate_method_performance(self, test_queries: List[str], 
                                  ground_truth: List[List[str]]) -> Dict[str, float]:
        """
        评估不同相似度方法的性能
        
        Args:
            test_queries: 测试查询列表
            ground_truth: 每个查询对应的正确答案列表
            
        Returns:
            性能指标字典
        """
        methods = ['cosine', 'pearson', 'euclidean', 'manhattan', 'dot', 'linear']
        results = {}
        
        original_method = self.similarity_method
        
        for method in methods:
            self.set_similarity_method(method)
            precision_scores = []
            
            for i, query in enumerate(test_queries):
                recommendations = self.recommend(query, top_k=5)
                recommended_questions = [rec[0] for rec in recommendations]
                
                # 计算精确率
                correct = sum(1 for q in recommended_questions if q in ground_truth[i])
                precision = correct / len(recommended_questions) if recommended_questions else 0
                precision_scores.append(precision)
            
            results[method] = np.mean(precision_scores)
        
        # 恢复原始方法
        self.set_similarity_method(original_method)
        
        return results