# -*- coding: utf-8 -*-
"""
层级聚类检索器
基于层级聚类算法的问题检索系统

作者: [您的姓名]
日期: 2024
"""

import os
from loguru import logger
from typing import List, Tuple, Dict, Optional, Union
import numpy as np
from sentence_transformers import SentenceTransformer
from sklearn.metrics.pairwise import cosine_similarity
from scipy.cluster.hierarchy import linkage, fcluster, dendrogram
import pickle
import matplotlib.pyplot as plt
# 设置中文字体支持
plt.rcParams['font.sans-serif'] = ['SimHei', 'Microsoft YaHei', 'DejaVu Sans']
plt.rcParams['axes.unicode_minus'] = False
from ...config import *  # 导入配置，包括镜像设置
from ...tools.cache_manager import cache_manager

# 使用全局缓存管理器
model_cache_dir = cache_manager.cache_dir


class HierarchicalClusteringRetriever:
    """
    多层级聚类检索器
    
    该检索器使用层级聚类算法构建多层级的问题组织结构，
    支持不同粒度的问题检索和聚类分析。
    
    特点：
    - 多层级聚类结构
    - 异常值检测
    - 多模型融合
    - 丰富的可视化功能
    - 参数自动调优
    """

    def __init__(self,
                 model_names: Union[str, List[str]] = 'paraphrase-multilingual-MiniLM-L12-v2',
                 levels: List[float] = [0.9, 0.7, 0.5, 0.3],
                 level_weights: List[float] = [0.3, 0.25, 0.2, 0.15],
                 outlier_detection: bool = True,
                 outlier_threshold: float = 0.1,
                 model_fusion_method: str = 'average',
                 linkage_method: str = 'average'):
        """
        初始化多层级聚类检索器
        
        Args:
            model_names: 预训练模型名称或模型列表（支持多模型融合）
            levels: 不同层级的距离阈值，从粗粒度到细粒度
            level_weights: 每个层级的权重
            outlier_detection: 是否启用异常值检测
            outlier_threshold: 异常值检测阈值
            model_fusion_method: 多模型融合方法 ('average', 'weighted', 'concat')
            linkage_method: 层级聚类的连接方法 ('ward', 'complete', 'average', 'single')
        """
        # 模型配置
        if isinstance(model_names, str):
            self.model_name = model_names
            self.model_names = [model_names]
        else:
            self.model_name = model_names[0] if model_names else 'paraphrase-multilingual-MiniLM-L12-v2'
            self.model_names = model_names
        
        self.models = []
        self._load_models()
        
        # 聚类配置
        self.levels = levels
        self.level_weights = level_weights
        self.linkage_method = linkage_method
        
        # 功能配置
        self.outlier_detection = outlier_detection
        self.outlier_threshold = outlier_threshold
        self.model_fusion_method = model_fusion_method
        
        # 数据存储
        self.candidate_questions = []
        self.candidate_scores = []
        self.candidate_embeddings = None
        self.outlier_mask = None
        
        # 聚类结果
        self.linkage_matrix = None
        self.level_clusters = {}  # {level_idx: {clusters: {cluster_id: info}, n_clusters: int}}
        self.level_labels = {}    # {level_idx: labels_array}
        
        # 调优历史
        self.tuning_history = []
        
        logger.info(f"✅ 初始化多层级聚类推荐器")
        logger.info(f"   - 模型: {self.model_names}")
        logger.info(f"   - 层级: {self.levels}")
        logger.info(f"   - 连接方法: {self.linkage_method}")
        logger.info(f"   - 异常值检测: {self.outlier_detection}")

    def _load_models(self):
        """加载sentence transformer模型（带缓存）"""
        for model_name in self.model_names:
            try:
                # 检查本地缓存是否存在
                cached_model_path = os.path.join(model_cache_dir, model_name.replace('/', '_'))
                if os.path.exists(cached_model_path):
                    logger.info(f"🔄 从本地缓存加载模型: {model_name}")
                    model = SentenceTransformer(cached_model_path)
                else:
                    logger.info(f"📥 首次下载模型: {model_name}，将缓存到 {model_cache_dir}")
                    model = SentenceTransformer(model_name, cache_folder=model_cache_dir)
                    # 保存到本地缓存
                    model.save(cached_model_path)
                self.models.append(model)
                logger.info(f"✅ 成功加载模型: {model_name}")
            except Exception as e:
                logger.warning(f"模型 {model_name} 加载失败: {e}")
                # 使用备用模型
                backup_model = 'all-MiniLM-L6-v2'
                cached_backup_path = os.path.join(model_cache_dir, backup_model.replace('/', '_'))
                if os.path.exists(cached_backup_path):
                    model = SentenceTransformer(cached_backup_path)
                else:
                    model = SentenceTransformer(backup_model, cache_folder=model_cache_dir)
                    model.save(cached_backup_path)
                self.models.append(model)
                logger.info("使用备用模型: all-MiniLM-L6-v2")

    def load_candidates(self, questions: List[str], scores: List[float] = None):
        """
        加载候选问题并进行多层级聚类
        
        Args:
            questions: 候选问题列表
            scores: 候选问题对应的分数（可选）
        """
        self.candidate_questions = questions
        self.candidate_scores = scores or [1.0] * len(questions)
        
        logger.info(f"开始处理 {len(questions)} 个候选问题...")
        
        # 生成多模型融合嵌入向量
        self.candidate_embeddings = self._generate_fused_embeddings(questions)
        
        # 异常值检测
        if self.outlier_detection:
            self._detect_outliers()
        
        # 执行多层级聚类
        self._perform_hierarchical_clustering()
        
        logger.info("✅ 候选问题加载和预处理完成")

    def _generate_fused_embeddings(self, texts: List[str]) -> np.ndarray:
        """
        生成多模型融合的嵌入向量
        
        Args:
            texts: 文本列表
            
        Returns:
            融合后的嵌入向量矩阵
        """
        logger.info("正在生成多模型融合嵌入向量...")
        
        all_embeddings = []
        
        for i, model in enumerate(self.models):
            logger.info(f"  模型 {i+1}/{len(self.models)}: {self.model_names[i]}")
            embeddings = model.encode(texts, show_progress_bar=True)
            all_embeddings.append(embeddings)
        
        # 融合策略
        if self.model_fusion_method == 'average':
            # 简单平均
            fused_embeddings = np.mean(all_embeddings, axis=0)
        elif self.model_fusion_method == 'weighted':
            # 加权平均（这里使用简单的等权重，可以根据模型性能调整）
            weights = [1.0 / len(all_embeddings)] * len(all_embeddings)
            fused_embeddings = np.average(all_embeddings, axis=0, weights=weights)
        elif self.model_fusion_method == 'concat':
            # 向量拼接
            fused_embeddings = np.concatenate(all_embeddings, axis=1)
        else:
            raise ValueError(f"不支持的融合方法: {self.model_fusion_method}")
        
        logger.info(f"✅ 融合嵌入向量生成完成，维度: {fused_embeddings.shape}")
        return fused_embeddings

    def _detect_outliers(self):
        """
        检测异常值问题
        """
        logger.info("开始异常值检测...")
        
        # 计算每个问题与其他问题的平均相似度
        similarity_matrix = cosine_similarity(self.candidate_embeddings)
        
        # 排除自身，计算平均相似度
        avg_similarities = []
        for i in range(len(similarity_matrix)):
            similarities = np.concatenate([similarity_matrix[i][:i], similarity_matrix[i][i+1:]])
            avg_similarities.append(np.mean(similarities))
        
        avg_similarities = np.array(avg_similarities)
        
        # 识别异常值（平均相似度低于阈值的问题）
        threshold = np.percentile(avg_similarities, self.outlier_threshold * 100)
        self.outlier_mask = avg_similarities < threshold
        n_outliers = np.sum(self.outlier_mask)
        
        logger.info(f"✅ 异常值检测完成，发现 {n_outliers} 个异常问题 (阈值: {threshold:.3f})")
        
        if n_outliers > 0:
            logger.info("异常问题示例:")
            outlier_indices = np.where(self.outlier_mask)[0]
            for i, idx in enumerate(outlier_indices[:3]):
                logger.info(f"  {i+1}. {self.candidate_questions[idx]} (相似度: {avg_similarities[idx]:.3f})")

    def _perform_hierarchical_clustering(self):
        """
        执行多层级聚类
        """
        logger.info(f"开始多层级聚类 (方法: {self.linkage_method})...")
        
        # 使用所有嵌入向量进行聚类
        valid_embeddings = self.candidate_embeddings
        
        # 执行层级聚类
        if self.linkage_method == 'ward':
            # Ward方法需要欧几里得距离
            self.linkage_matrix = linkage(valid_embeddings, method='ward')
        else:
            # 其他方法可以使用余弦距离
            from scipy.spatial.distance import pdist
            distances = pdist(valid_embeddings, metric='cosine')
            self.linkage_matrix = linkage(distances, method=self.linkage_method)
        
        # 在不同距离阈值下切出不同层级的簇
        for i, threshold in enumerate(self.levels):
            labels = fcluster(self.linkage_matrix, threshold, criterion='distance')
            self.level_labels[i] = labels
            
            # 构建聚类信息
            clusters = {}
            unique_labels = np.unique(labels)
            
            for cluster_id in unique_labels:
                cluster_mask = labels == cluster_id
                cluster_indices = np.where(cluster_mask)[0]
                
                cluster_questions = [self.candidate_questions[idx] for idx in cluster_indices]
                cluster_scores = [self.candidate_scores[idx] for idx in cluster_indices]
                
                clusters[int(cluster_id)] = {
                    'size': len(cluster_indices),
                    'questions': cluster_questions,  # 显示所有问题
                    'avg_score': np.mean(cluster_scores),
                    'indices': cluster_indices.tolist()
                }
            
            self.level_clusters[i] = {
                'clusters': clusters,
                'n_clusters': len(unique_labels),
                'threshold': threshold
            }
            
            n_clusters = len(unique_labels)
            logger.info(f"  层级 {i} (阈值: {threshold}): {n_clusters} 个簇")
        
        logger.info("✅ 多层级聚类完成")

    def recommend(self, query_question: str, top_k: int = 5,
                  similarity_threshold: float = 0.2,
                  base_weight: float = 0.6,
                  return_detailed_scores: bool = False,
                  filter_outliers: bool = True) -> List[Tuple]:
        """
        根据查询问题推荐相似问题
        
        Args:
            query_question: 查询问题
            top_k: 返回的推荐数量
            similarity_threshold: 相似度阈值
            base_weight: 基础相似度权重
            return_detailed_scores: 是否返回详细分数
            filter_outliers: 是否过滤异常值
            
        Returns:
            推荐问题列表
        """
        if not self.candidate_questions:
            raise ValueError("请先使用load_candidates()加载候选问题")
        
        # 计算查询问题的嵌入向量
        query_embedding = self._generate_fused_embeddings([query_question])[0]
        
        # 计算相似度
        similarities = cosine_similarity([query_embedding], self.candidate_embeddings)[0]
        
        # 计算多层级簇分数
        level_scores = self._calculate_multilevel_scores(similarities)
        
        # 生成推荐
        recommendations = []
        max_score = max(self.candidate_scores) if self.candidate_scores else 1.0
        
        for i, similarity in enumerate(similarities):
            # 过滤条件
            if similarity < similarity_threshold:
                continue
            
            # 过滤异常值
            if filter_outliers and self.outlier_mask is not None and self.outlier_mask[i]:
                continue
            
            question = self.candidate_questions[i]
            
            # 排除与查询问题相同的候选问题
            if question.strip() == query_question.strip():
                continue
            
            score = self.candidate_scores[i]
            
            # 基础分数：相似度 + 归一化原始分数
            base_score = similarity * 0.7 + (score / max_score) * 0.3
            
            # 多层级簇分数
            multilevel_bonus = sum(level_scores[level][i] for level in range(len(self.levels)))
            
            # 最终分数
            final_score = base_weight * base_score + multilevel_bonus
            
            if return_detailed_scores:
                level_details = {f"level_{j + 1}": level_scores[j][i] for j in range(len(self.levels))}
                outlier_info = {'is_outlier': self.outlier_mask[i] if self.outlier_mask is not None else False}
                recommendations.append(
                    (question, final_score, base_score, multilevel_bonus, similarity, level_details, outlier_info))
            else:
                recommendations.append((question, final_score, base_score, multilevel_bonus, similarity))
        
        # 排序并返回top_k
        recommendations.sort(key=lambda x: x[1], reverse=True)
        return recommendations[:top_k]

    def _calculate_multilevel_scores(self, query_similarities: np.ndarray) -> Dict[int, np.ndarray]:
        """
        计算多层级簇分数
        
        Args:
            query_similarities: 查询问题与所有候选问题的相似度
            
        Returns:
            每个层级的分数字典
        """
        level_scores = {}
        
        for level_idx in range(len(self.levels)):
            labels = self.level_labels[level_idx]
            weight = self.level_weights[level_idx]
            scores = np.zeros(len(self.candidate_questions))
            
            # 为每个簇计算分数
            unique_clusters = np.unique(labels)
            
            for cluster_id in unique_clusters:
                cluster_mask = labels == cluster_id
                cluster_indices = np.where(cluster_mask)[0]
                
                if len(cluster_indices) <= 1:
                    continue
                
                # 计算簇内平均相似度到查询问题
                cluster_query_sim = query_similarities[cluster_indices].mean()
                
                # 计算簇内问题间的平均相似度
                cluster_embeddings = self.candidate_embeddings[cluster_indices]
                cluster_internal_sim = cosine_similarity(cluster_embeddings).mean()
                
                # 簇分数 = 查询相似度 * 内聚性 * 权重
                cluster_score = weight * cluster_query_sim * cluster_internal_sim
                
                # 为簇内所有问题分配分数
                scores[cluster_indices] = cluster_score
            
            level_scores[level_idx] = scores
        
        return level_scores

    def get_cluster_info(self, level: int = None) -> Dict:
        """
        获取聚类信息
        
        Args:
            level: 层级索引，如果为None则返回所有层级信息
            
        Returns:
            聚类信息字典
        """
        if level is not None:
            # 返回指定层级信息（保持向后兼容）
            if level not in self.level_clusters:
                raise ValueError(f"层级 {level} 不存在")
            return self.level_clusters[level]
        
        # 返回所有层级信息
        info = {
            "levels": len(self.levels),
            "thresholds": self.levels,
            "weights": self.level_weights,
            "linkage_method": self.linkage_method,
            "outlier_detection": self.outlier_detection,
            "level_details": {}
        }
        
        for level_idx in range(len(self.levels)):
            if level_idx in self.level_clusters:
                level_data = self.level_clusters[level_idx]
                info["level_details"][f"level_{level_idx + 1}"] = {
                    "threshold": self.levels[level_idx],
                    "n_clusters": level_data.get('n_clusters', 0),
                    "clusters": level_data.get('clusters', {})
                }
        
        return info

    def plot_dendrogram(self, max_d: Optional[float] = None, 
                       figsize: Tuple[int, int] = (15, 8),
                       save_path: Optional[str] = None, 
                       show_labels: bool = False):
        """
        绘制层级聚类树状图
        
        Args:
            max_d: 最大距离阈值线
            figsize: 图形大小
            save_path: 保存路径
            show_labels: 是否显示标签
        """
        if self.linkage_matrix is None:
            raise ValueError("Please perform clustering first")
        
        plt.figure(figsize=figsize)
        
        # 绘制树状图
        if show_labels and len(self.candidate_questions) <= 50:
            labels = [q[:20] + '...' if len(q) > 20 else q for q in self.candidate_questions]
        else:
            labels = None
        
        dendrogram_plot = dendrogram(
            self.linkage_matrix,
            labels=labels,
            leaf_rotation=90,
            leaf_font_size=8
        )
        
        # Add threshold lines
        if max_d:
            plt.axhline(y=max_d, c='red', linestyle='--', label=f'Threshold: {max_d}')
            plt.legend()
        
        # Add level threshold lines
        colors = ['red', 'blue', 'green', 'orange', 'purple']
        for i, (level, color) in enumerate(zip(self.levels, colors)):
            plt.axhline(y=level, c=color, linestyle='--', alpha=0.7,
                        label=f'Level {i + 1}: {level}')
        
        plt.title('Hierarchical Clustering Dendrogram', fontsize=16, fontweight='bold')
        plt.xlabel('Question Index', fontsize=12)
        plt.ylabel('Distance', fontsize=12)
        plt.legend()
        plt.tight_layout()
        
        if save_path:
            # 确保使用支持中文的字体
            plt.savefig(save_path, dpi=300, bbox_inches='tight', 
                       facecolor='white', edgecolor='none')
            logger.info(f"Dendrogram saved to: {save_path}")
        
        plt.show()

    def plot_cluster_distribution(self, figsize: Tuple[int, int] = (15, 10), 
                                 save_path: Optional[str] = None):
        """
        绘制多层级聚类分布图
        
        Args:
            figsize: 图形大小
            save_path: 保存路径
        """
        if not self.level_clusters:
            raise ValueError("Please perform clustering first")
        
        fig, axes = plt.subplots(2, 2, figsize=figsize)
        axes = axes.flatten()
        
        for level_idx in range(min(len(self.levels), 4)):
            ax = axes[level_idx]
            
            clusters_info = self.level_clusters[level_idx]
            cluster_sizes = [info['size'] for info in clusters_info['clusters'].values()]
            cluster_ids = list(clusters_info['clusters'].keys())
            
            # Plot cluster size distribution
            bars = ax.bar(range(len(cluster_sizes)), cluster_sizes, 
                         color=plt.cm.Set3(np.linspace(0, 1, len(cluster_sizes))))
            
            ax.set_title(f'Level {level_idx} (Threshold: {self.levels[level_idx]})', 
                        fontsize=12, fontweight='bold')
            ax.set_xlabel('Cluster ID')
            ax.set_ylabel('Cluster Size')
            ax.set_xticks(range(len(cluster_ids)))
            ax.set_xticklabels([str(cid) for cid in cluster_ids])
            
            # Add value labels
            for bar, size in zip(bars, cluster_sizes):
                height = bar.get_height()
                ax.text(bar.get_x() + bar.get_width()/2., height + 0.1,
                       f'{size}', ha='center', va='bottom')
        
        # Hide extra subplots
        for i in range(len(self.levels), 4):
            axes[i].set_visible(False)
        
        plt.suptitle('Multi-level Clustering Distribution', fontsize=16, fontweight='bold')
        plt.tight_layout()
        
        if save_path:
            plt.savefig(save_path, dpi=300, bbox_inches='tight')
            logger.info(f"Cluster distribution plot saved to: {save_path}")
        
        plt.show()

    def save_model(self, filepath: str):
        """
        保存模型状态
        
        Args:
            filepath: 保存路径
        """
        model_data = {
            'model_names': self.model_names,
            'levels': self.levels,
            'level_weights': self.level_weights,
            'linkage_method': self.linkage_method,
            'outlier_detection': self.outlier_detection,
            'outlier_threshold': self.outlier_threshold,
            'model_fusion_method': self.model_fusion_method,
            'candidate_questions': self.candidate_questions,
            'candidate_scores': self.candidate_scores,
            'candidate_embeddings': self.candidate_embeddings,
            'outlier_mask': self.outlier_mask,
            'linkage_matrix': self.linkage_matrix,
            'level_clusters': self.level_clusters,
            'level_labels': self.level_labels,
            'tuning_history': self.tuning_history
        }
        
        with open(filepath, 'wb') as f:
            pickle.dump(model_data, f)
        
        logger.info(f"✅ 模型已保存到: {filepath}")

    def load_model(self, filepath: str):
        """
        加载模型状态
        
        Args:
            filepath: 模型文件路径
        """
        with open(filepath, 'rb') as f:
            model_data = pickle.load(f)
        
        # 恢复模型状态
        self.model_names = model_data['model_names']
        self.levels = model_data['levels']
        self.level_weights = model_data['level_weights']
        self.linkage_method = model_data['linkage_method']
        self.outlier_detection = model_data['outlier_detection']
        self.outlier_threshold = model_data['outlier_threshold']
        self.model_fusion_method = model_data['model_fusion_method']
        self.candidate_questions = model_data['candidate_questions']
        self.candidate_scores = model_data['candidate_scores']
        self.candidate_embeddings = model_data['candidate_embeddings']
        self.outlier_mask = model_data['outlier_mask']
        self.linkage_matrix = model_data['linkage_matrix']
        self.level_clusters = model_data['level_clusters']
        self.level_labels = model_data['level_labels']
        self.tuning_history = model_data['tuning_history']
        
        # 重新加载模型
        self._load_models()
        
        logger.info(f"✅ 模型已从 {filepath} 加载")

    def get_statistics(self) -> Dict:
        """
        获取聚类统计信息
        
        Returns:
            统计信息字典
        """
        if not self.level_clusters:
            return {"error": "请先执行聚类"}
        
        stats = {
            "total_questions": len(self.candidate_questions),
            "levels": len(self.levels),
            "outliers": int(np.sum(self.outlier_mask)) if self.outlier_mask is not None else 0,
            "level_stats": {}
        }
        
        for level_idx, clusters_info in self.level_clusters.items():
            cluster_sizes = [info['size'] for info in clusters_info['clusters'].values()]
            stats["level_stats"][f"level_{level_idx}"] = {
                "threshold": self.levels[level_idx],
                "n_clusters": clusters_info['n_clusters'],
                "avg_cluster_size": np.mean(cluster_sizes),
                "max_cluster_size": np.max(cluster_sizes),
                "min_cluster_size": np.min(cluster_sizes)
            }
        
        return stats