# -*- coding: utf-8 -*-
"""
DBSCAN聚类检索器
基于DBSCAN聚类算法的问题检索系统

作者: [您的姓名]
日期: 2024
"""

import os
from loguru import logger
from typing import List, Tuple, Dict, Optional
import numpy as np
from sentence_transformers import SentenceTransformer
from sklearn.cluster import DBSCAN
from sklearn.metrics.pairwise import cosine_similarity
from ...config import *  # 导入配置，包括镜像设置
from ...tools.cache_manager import cache_manager

# 使用全局缓存管理器
model_cache_dir = cache_manager.cache_dir


class DBSCANClusteringRetriever:
    """
    基于DBSCAN聚类的问题检索器
    
    该检索器首先对候选问题进行DBSCAN聚类，然后在相关簇中检索问题。
    DBSCAN算法能够自动发现簇的数量，并识别噪声点。
    
    特点：
    - 自动确定簇的数量
    - 能够识别噪声点（异常问题）
    - 基于密度的聚类，适合处理不规则形状的簇
    - 支持聚类可视化
    - 提供详细的聚类信息
    """

    def __init__(self, model_name: str = 'paraphrase-multilingual-MiniLM-L12-v2',
                 eps: float = 0.15, min_samples: int = 2):
        """
        初始化DBSCAN检索器

        Args:
            model_name: sentence-transformers模型名称
            eps: DBSCAN的eps参数，定义邻域半径
            min_samples: DBSCAN的min_samples参数，定义核心点的最小邻居数
        """
        try:
            # 检查本地缓存是否存在
            cached_model_path = os.path.join(model_cache_dir, model_name.replace('/', '_'))
            if os.path.exists(cached_model_path):
                logger.info(f"🔄 从本地缓存加载模型: {model_name}")
                self.model = SentenceTransformer(cached_model_path)
            else:
                logger.info(f"📥 首次下载模型: {model_name}，将缓存到 {model_cache_dir}")
                self.model = SentenceTransformer(model_name, cache_folder=model_cache_dir)
                # 保存到本地缓存
                self.model.save(cached_model_path)
            logger.info(f"✅ 成功加载模型: {model_name}")
        except Exception as e:
            logger.warning(f"模型加载失败: {e}")
            logger.info("尝试使用备用模型...")
            # 备用方案：使用更小的模型
            backup_model = 'all-MiniLM-L6-v2'
            cached_backup_path = os.path.join(model_cache_dir, backup_model.replace('/', '_'))
            if os.path.exists(cached_backup_path):
                self.model = SentenceTransformer(cached_backup_path)
            else:
                self.model = SentenceTransformer(backup_model, cache_folder=model_cache_dir)
                self.model.save(cached_backup_path)

        self.eps = eps
        self.min_samples = min_samples
        self.candidate_questions = []
        self.candidate_scores = []
        self.candidate_embeddings = None
        self.clusters = None
        self.cluster_labels = None
        self.dbscan_model = None
        self.similarity_matrix = None

    def load_candidates(self, questions: List[str], scores: List[float] = None):
        """
        加载候选问题并进行聚类

        Args:
            questions: 候选问题列表
            scores: 候选问题对应的分数（可选）
        """
        self.candidate_questions = questions
        self.candidate_scores = scores or [1.0] * len(questions)

        # 计算embeddings
        logger.info("正在计算候选问题的嵌入向量...")
        self.candidate_embeddings = self.model.encode(questions)
        logger.info(f"✅ 完成 {len(questions)} 个问题的嵌入向量计算")

        # 执行DBSCAN聚类
        self._perform_clustering()

    def _perform_clustering(self):
        """
        执行DBSCAN聚类
        """
        logger.info(f"开始DBSCAN聚类 (eps={self.eps}, min_samples={self.min_samples})...")
        
        # 计算余弦相似度矩阵
        self.similarity_matrix = cosine_similarity(self.candidate_embeddings)

        # 将相似度转换为距离：distance = 1 - similarity
        # 确保距离值在[0, 2]范围内（余弦相似度在[-1, 1]范围内）
        distance_matrix = 1 - self.similarity_matrix

        # 确保距离矩阵中没有负值（虽然理论上不应该有）
        distance_matrix = np.clip(distance_matrix, 0, 2)

        # 确保对角线为0（自己与自己的距离为0）
        np.fill_diagonal(distance_matrix, 0)

        # 执行DBSCAN聚类
        self.dbscan_model = DBSCAN(eps=self.eps, min_samples=self.min_samples, metric='precomputed')
        self.cluster_labels = self.dbscan_model.fit_predict(distance_matrix)

        # 保存labels和embeddings以支持聚类质量评估
        self.labels = self.cluster_labels
        self.embeddings = self.candidate_embeddings

        # 组织聚类结果
        self.clusters = {}
        for i, label in enumerate(self.cluster_labels):
            if label not in self.clusters:
                self.clusters[label] = []
            self.clusters[label].append(i)

        # 统计聚类结果
        n_clusters = len(set(self.cluster_labels)) - (1 if -1 in self.cluster_labels else 0)
        n_noise = list(self.cluster_labels).count(-1)
        
        logger.info(f"✅ 聚类完成:")
        logger.info(f"   - 发现 {n_clusters} 个簇")
        logger.info(f"   - {n_noise} 个噪声点")
        logger.info(f"   - 簇大小分布: {[len(cluster) for label, cluster in self.clusters.items() if label != -1]}")

    def recommend(self, query_question: str, top_k: int = 5,
                  similarity_threshold: float = 0.2,
                  cluster_weight: float = 0.3,
                  return_detailed_scores: bool = False) -> List[Tuple]:
        """
        根据查询问题推荐相似问题

        Args:
            query_question: 查询问题
            top_k: 返回的推荐数量
            similarity_threshold: 相似度阈值
            cluster_weight: 聚类权重（同簇问题的加权）
            return_detailed_scores: 是否返回详细分数信息

        Returns:
            如果return_detailed_scores=False: 推荐问题列表，每个元素为(问题, 综合分数, 相似度)
            如果return_detailed_scores=True: 推荐问题列表，每个元素为(问题, 综合分数, base_score, cluster_bonus, 相似度)
        """
        if not self.candidate_questions:
            raise ValueError("请先使用load_candidates()加载候选问题")

        # 计算查询问题的embedding
        query_embedding = self.model.encode([query_question])

        # 计算与所有候选问题的相似度
        similarities = cosine_similarity(query_embedding, self.candidate_embeddings)[0]

        # 找到查询问题最相似的簇
        best_cluster = self._find_best_cluster(similarities)

        # 计算推荐分数
        recommendations = []
        max_score = max(self.candidate_scores) if self.candidate_scores else 1.0

        for i, similarity in enumerate(similarities):
            if similarity < similarity_threshold:
                continue

            question = self.candidate_questions[i]
            score = self.candidate_scores[i]

            # 基础分数：相似度 + 归一化原始分数
            base_score = similarity * 0.7 + (score / max_score) * 0.3

            # 如果在最相关的簇中，给予额外加权
            cluster_bonus = 0
            if best_cluster != -1 and self.cluster_labels[i] == best_cluster:
                cluster_bonus = cluster_weight

            final_score = base_score + cluster_bonus

            if return_detailed_scores:
                recommendations.append((question, final_score, base_score, cluster_bonus, similarity))
            else:
                recommendations.append((question, final_score, similarity))

        # 按综合分数排序
        recommendations.sort(key=lambda x: x[1], reverse=True)

        # 返回top_k个推荐
        return recommendations[:top_k]

    def _find_best_cluster(self, similarities: np.ndarray) -> int:
        """
        找到与查询问题最相关的簇

        Args:
            similarities: 查询问题与所有候选问题的相似度

        Returns:
            最相关的簇标签
        """
        cluster_scores = {}

        for i, similarity in enumerate(similarities):
            cluster_label = self.cluster_labels[i]
            if cluster_label not in cluster_scores:
                cluster_scores[cluster_label] = []
            cluster_scores[cluster_label].append(similarity)

        # 计算每个簇的平均相似度
        best_cluster = -1
        best_score = 0

        for cluster_label, sims in cluster_scores.items():
            if cluster_label == -1:  # 噪声点
                continue
            avg_similarity = np.mean(sims)
            if avg_similarity > best_score:
                best_score = avg_similarity
                best_cluster = cluster_label

        return best_cluster

    def get_cluster_info(self) -> Dict:
        """
        获取聚类信息

        Returns:
            聚类信息字典
        """
        if self.clusters is None:
            return {}

        cluster_info = {}
        for label, indices in self.clusters.items():
            cluster_type = "噪声点" if label == -1 else f"簇 {label}"
            cluster_info[label] = {
                'type': cluster_type,
                'size': len(indices),
                'questions': [self.candidate_questions[i] for i in indices],
                'avg_score': np.mean([self.candidate_scores[i] for i in indices]) if self.candidate_scores else 1.0
            }

        cluster_info.update({
            'labels': self.cluster_labels,
            'n_clusters': len(set(self.cluster_labels)) - (1 if -1 in self.cluster_labels else 0),
            'n_noise': list(self.cluster_labels).count(-1)
        })

        return cluster_info

    def optimize_parameters(self, eps_range: Tuple[float, float] = (0.1, 0.5),
                          min_samples_range: Tuple[int, int] = (2, 10),
                          n_trials: int = 20) -> Dict:
        """
        优化DBSCAN参数
        
        Args:
            eps_range: eps参数的搜索范围
            min_samples_range: min_samples参数的搜索范围
            n_trials: 尝试次数
            
        Returns:
            最优参数和评估结果
        """
        if self.candidate_embeddings is None:
            raise ValueError("请先加载候选问题")
        
        logger.info("开始参数优化...")
        
        best_params = None
        best_score = -1
        results = []
        
        # 计算距离矩阵（只计算一次）
        similarity_matrix = cosine_similarity(self.candidate_embeddings)
        distance_matrix = 1 - similarity_matrix
        distance_matrix = np.clip(distance_matrix, 0, 2)
        np.fill_diagonal(distance_matrix, 0)
        
        for trial in range(n_trials):
            # 随机采样参数
            eps = np.random.uniform(*eps_range)
            min_samples = np.random.randint(*min_samples_range)
            
            # 执行聚类
            dbscan = DBSCAN(eps=eps, min_samples=min_samples, metric='precomputed')
            labels = dbscan.fit_predict(distance_matrix)
            
            # 评估聚类质量
            n_clusters = len(set(labels)) - (1 if -1 in labels else 0)
            n_noise = list(labels).count(-1)
            
            if n_clusters == 0:  # 没有形成簇
                score = 0
            else:
                # 简单的评估指标：簇数量适中，噪声点不太多
                cluster_sizes = [list(labels).count(i) for i in set(labels) if i != -1]
                avg_cluster_size = np.mean(cluster_sizes) if cluster_sizes else 0
                noise_ratio = n_noise / len(labels)
                
                # 综合评分
                score = (1 / (1 + abs(n_clusters - 5))) * (1 - noise_ratio) * min(avg_cluster_size / 10, 1)
            
            results.append({
                'eps': eps,
                'min_samples': min_samples,
                'n_clusters': n_clusters,
                'n_noise': n_noise,
                'score': score
            })
            
            if score > best_score:
                best_score = score
                best_params = {'eps': eps, 'min_samples': min_samples}
        
        logger.info(f"✅ 参数优化完成，最优参数: {best_params}")
        
        return {
            'best_params': best_params,
            'best_score': best_score,
            'all_results': results
        }

    def plot_clusters(self, figsize: Tuple[int, int] = (12, 8), save_path: Optional[str] = None):
        """
        可视化聚类结果
        
        Args:
            figsize: 图形大小
            save_path: 保存路径（可选）
        """
        if self.cluster_labels is None:
            raise ValueError("请先进行聚类")
        
        # 使用t-SNE降维到2D进行可视化
        try:
            from sklearn.manifold import TSNE
            
            # 降维
            tsne = TSNE(n_components=2, random_state=42, perplexity=min(30, len(self.candidate_embeddings)-1))
            embeddings_2d = tsne.fit_transform(self.candidate_embeddings)
            
            # 绘图
            plt.figure(figsize=figsize)
            
            # 为每个簇分配颜色
            unique_labels = set(self.cluster_labels)
            colors = plt.cm.Set1(np.linspace(0, 1, len(unique_labels)))
            
            for label, color in zip(unique_labels, colors):
                if label == -1:
                    # 噪声点用黑色
                    color = 'black'
                    marker = 'x'
                    label_name = '噪声点'
                else:
                    marker = 'o'
                    label_name = f'簇 {label}'
                
                mask = self.cluster_labels == label
                plt.scatter(embeddings_2d[mask, 0], embeddings_2d[mask, 1], 
                           c=[color], marker=marker, label=label_name, alpha=0.7)
            
            plt.title(f'DBSCAN聚类结果 (eps={self.eps}, min_samples={self.min_samples})')
            plt.xlabel('t-SNE 维度 1')
            plt.ylabel('t-SNE 维度 2')
            plt.legend()
            plt.grid(True, alpha=0.3)
            
            if save_path:
                plt.savefig(save_path, dpi=300, bbox_inches='tight')
                logger.info(f"聚类图已保存到: {save_path}")
            
            plt.show()
            
        except ImportError:
            logger.error("需要安装scikit-learn来进行可视化")

    def plot_cluster_distribution(self, figsize: Tuple[int, int] = (10, 6), save_path: Optional[str] = None):
        """
        绘制簇大小分布图
        
        Args:
            figsize: 图形大小
            save_path: 保存路径（可选）
        """
        if self.clusters is None:
            raise ValueError("请先进行聚类")
        
        # 统计簇大小
        cluster_sizes = []
        cluster_labels = []
        
        for label, indices in self.clusters.items():
            if label != -1:  # 排除噪声点
                cluster_sizes.append(len(indices))
                cluster_labels.append(f'簇 {label}')
        
        # 添加噪声点
        if -1 in self.clusters:
            cluster_sizes.append(len(self.clusters[-1]))
            cluster_labels.append('噪声点')
        
        # 绘图
        plt.figure(figsize=figsize)
        bars = plt.bar(cluster_labels, cluster_sizes)
        
        # 为噪声点设置不同颜色
        for i, bar in enumerate(bars):
            if cluster_labels[i] == '噪声点':
                bar.set_color('red')
            else:
                bar.set_color('skyblue')
        
        plt.title('簇大小分布')
        plt.xlabel('簇标签')
        plt.ylabel('问题数量')
        plt.xticks(rotation=45)
        
        # 在柱子上显示数值
        for i, size in enumerate(cluster_sizes):
            plt.text(i, size + 0.1, str(size), ha='center', va='bottom')
        
        plt.tight_layout()
        
        if save_path:
            plt.savefig(save_path, dpi=300, bbox_inches='tight')
            logger.info(f"分布图已保存到: {save_path}")
        
        plt.show()

    def export_clusters(self, filepath: str):
        """
        导出聚类结果到文件
        
        Args:
            filepath: 导出文件路径
        """
        if self.clusters is None:
            raise ValueError("请先进行聚类")
        
        with open(filepath, 'w', encoding='utf-8') as f:
            f.write(f"DBSCAN聚类结果\n")
            f.write(f"参数: eps={self.eps}, min_samples={self.min_samples}\n")
            f.write(f"总问题数: {len(self.candidate_questions)}\n\n")
            
            for label, indices in self.clusters.items():
                if label == -1:
                    f.write(f"=== 噪声点 ({len(indices)}个) ===\n")
                else:
                    f.write(f"=== 簇 {label} ({len(indices)}个) ===\n")
                
                for i, idx in enumerate(indices):
                    f.write(f"{i+1}. {self.candidate_questions[idx]}\n")
                f.write("\n")
        
        logger.info(f"聚类结果已导出到: {filepath}")