# -*- coding: utf-8 -*-
"""
聚类分析模块 - 重构版本

提供多种聚类算法、评估指标和可视化功能
支持智能参数调优和自动最优聚类数量推荐
"""

import numpy as np
import pandas as pd
from typing import Dict, List, Any, Optional, Tuple, Union
from abc import ABC, abstractmethod
from dataclasses import dataclass
from enum import Enum
import warnings
warnings.filterwarnings('ignore')

# 机器学习库
from sklearn.cluster import KMeans, AgglomerativeClustering, DBSCAN, SpectralClustering
from sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler
from sklearn.metrics import silhouette_score, calinski_harabasz_score, davies_bouldin_score
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.neighbors import NearestNeighbors

# 导入配置和日志
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

try:
    from core.config import Config
    from utils.logger import LoggerMixin
except ImportError:
    class Config:
        def __init__(self):
            self.settings = {}
        def get(self, key, default=None):
            return self.settings.get(key, default)
    
    class LoggerMixin:
        def log_info(self, msg): print(f"INFO: {msg}")
        def log_warning(self, msg): print(f"WARNING: {msg}")
        def log_error(self, msg): print(f"ERROR: {msg}")


class ClusteringAlgorithm(Enum):
    """支持的聚类算法"""
    KMEANS = "kmeans"
    HIERARCHICAL = "hierarchical"
    DBSCAN = "dbscan"
    SPECTRAL = "spectral"


class ScalingMethod(Enum):
    """数据标准化方法"""
    STANDARD = "standard"
    MINMAX = "minmax"
    ROBUST = "robust"
    NONE = "none"


@dataclass
class ClusteringConfig:
    """聚类配置参数"""
    algorithm: ClusteringAlgorithm = ClusteringAlgorithm.KMEANS
    n_clusters: int = 3
    scaling_method: ScalingMethod = ScalingMethod.STANDARD
    max_samples: int = 1000
    max_features: int = 10
    random_state: int = 42
    enable_optimization: bool = True
    enable_visualization: bool = True
    
    # 算法特定参数
    kmeans_params: Dict[str, Any] = None
    dbscan_params: Dict[str, Any] = None
    hierarchical_params: Dict[str, Any] = None
    spectral_params: Dict[str, Any] = None
    
    def __post_init__(self):
        if self.kmeans_params is None:
            self.kmeans_params = {'n_init': 10, 'max_iter': 300, 'tol': 1e-4}
        if self.dbscan_params is None:
            self.dbscan_params = {'eps': 0.5, 'min_samples': 5}
        if self.hierarchical_params is None:
            self.hierarchical_params = {'linkage': 'ward'}
        if self.spectral_params is None:
            self.spectral_params = {'affinity': 'rbf', 'gamma': 1.0}


class BaseClusteringAlgorithm(ABC):
    """聚类算法基类"""
    
    def __init__(self, config: ClusteringConfig):
        self.config = config
        self.model = None
        self.labels_ = None
    
    @abstractmethod
    def fit_predict(self, data: np.ndarray) -> np.ndarray:
        """执行聚类并返回标签"""
        pass
    
    @abstractmethod
    def get_cluster_centers(self) -> Optional[np.ndarray]:
        """获取聚类中心"""
        pass
    
    def get_model_params(self) -> Dict[str, Any]:
        """获取模型参数"""
        if self.model and hasattr(self.model, 'get_params'):
            return {k: str(v) if not isinstance(v, (int, float, str, bool, type(None))) else v 
                   for k, v in self.model.get_params().items()}
        return {}


class KMeansAlgorithm(BaseClusteringAlgorithm):
    """K-means聚类算法"""
    
    def fit_predict(self, data: np.ndarray) -> np.ndarray:
        params = self.config.kmeans_params.copy()
        params.update({
            'n_clusters': self.config.n_clusters,
            'random_state': self.config.random_state
        })
        
        self.model = KMeans(**params)
        self.labels_ = self.model.fit_predict(data)
        return self.labels_
    
    def get_cluster_centers(self) -> Optional[np.ndarray]:
        return self.model.cluster_centers_ if self.model else None


class DBSCANAlgorithm(BaseClusteringAlgorithm):
    """DBSCAN聚类算法"""
    
    def fit_predict(self, data: np.ndarray) -> np.ndarray:
        params = self.config.dbscan_params.copy()
        
        # 保存数据用于后续计算聚类中心
        self._data = data
        self.model = DBSCAN(**params)
        self.labels_ = self.model.fit_predict(data)
        return self.labels_
    
    def get_cluster_centers(self) -> Optional[np.ndarray]:
        if self.labels_ is None or not hasattr(self, '_data'):
            return None
        
        # 计算每个聚类的中心点（排除噪声点）
        unique_labels = np.unique(self.labels_)
        if -1 in unique_labels:
            unique_labels = unique_labels[unique_labels != -1]
        
        if len(unique_labels) == 0:
            return None
        
        centers = []
        for label in unique_labels:
            mask = self.labels_ == label
            center = np.mean(self._data[mask], axis=0)
            centers.append(center)
        
        return np.array(centers) if centers else None


class HierarchicalAlgorithm(BaseClusteringAlgorithm):
    """层次聚类算法"""
    
    def fit_predict(self, data: np.ndarray) -> np.ndarray:
        params = self.config.hierarchical_params.copy()
        params.update({
            'n_clusters': self.config.n_clusters
        })
        
        # 保存数据用于后续计算聚类中心
        self._data = data
        self.model = AgglomerativeClustering(**params)
        self.labels_ = self.model.fit_predict(data)
        return self.labels_
    
    def get_cluster_centers(self) -> Optional[np.ndarray]:
        # 层次聚类没有明确的聚类中心，计算每个聚类的质心
        if self.labels_ is None or not hasattr(self, '_data'):
            return None
        
        unique_labels = np.unique(self.labels_)
        centers = []
        for label in unique_labels:
            mask = self.labels_ == label
            center = np.mean(self._data[mask], axis=0)
            centers.append(center)
        
        return np.array(centers) if centers else None


class SpectralAlgorithm(BaseClusteringAlgorithm):
    """谱聚类算法"""
    
    def fit_predict(self, data: np.ndarray) -> np.ndarray:
        params = self.config.spectral_params.copy()
        params.update({
            'n_clusters': self.config.n_clusters,
            'random_state': self.config.random_state
        })
        
        # 保存数据用于后续计算聚类中心
        self._data = data
        self.model = SpectralClustering(**params)
        self.labels_ = self.model.fit_predict(data)
        return self.labels_
    
    def get_cluster_centers(self) -> Optional[np.ndarray]:
        # 谱聚类没有明确的聚类中心，计算每个聚类的质心
        if self.labels_ is None or not hasattr(self, '_data'):
            return None
        
        unique_labels = np.unique(self.labels_)
        centers = []
        for label in unique_labels:
            mask = self.labels_ == label
            center = np.mean(self._data[mask], axis=0)
            centers.append(center)
        
        return np.array(centers) if centers else None





class ClusteringMetrics:
    """聚类评估指标计算器"""
    
    @staticmethod
    def calculate_all_metrics(data: np.ndarray, labels: np.ndarray, 
                            sample_size: int = 1000) -> Dict[str, float]:
        """计算所有聚类评估指标"""
        metrics = {}
        
        # 过滤噪声点
        valid_mask = labels != -1
        if valid_mask.sum() < 2:
            return {"error": "有效聚类点太少，无法计算评估指标"}
        
        valid_data = data[valid_mask]
        valid_labels = labels[valid_mask]
        
        # 检查聚类数量
        unique_labels = np.unique(valid_labels)
        if len(unique_labels) < 2:
            return {"error": "聚类数量不足，无法计算评估指标"}
        
        try:
            # 对大数据集进行采样以提高计算速度
            if len(valid_data) > sample_size:
                sample_idx = np.random.choice(len(valid_data), sample_size, replace=False)
                sample_data = valid_data[sample_idx]
                sample_labels = valid_labels[sample_idx]
                metrics['note'] = f"基于{sample_size}个样本的估算值"
            else:
                sample_data = valid_data
                sample_labels = valid_labels
            
            # 轮廓系数
            metrics['silhouette_score'] = float(silhouette_score(sample_data, sample_labels))
            
            # Calinski-Harabasz指数
            metrics['calinski_harabasz_score'] = float(calinski_harabasz_score(sample_data, sample_labels))
            
            # Davies-Bouldin指数
            metrics['davies_bouldin_score'] = float(davies_bouldin_score(sample_data, sample_labels))
            
            # 聚类内平方和
            metrics['inertia'] = ClusteringMetrics._calculate_inertia(sample_data, sample_labels)
            
        except Exception as e:
            metrics['error'] = str(e)
        
        return metrics
    
    @staticmethod
    def _calculate_inertia(data: np.ndarray, labels: np.ndarray) -> float:
        """计算聚类内平方和"""
        inertia = 0.0
        unique_labels = np.unique(labels)
        
        for label in unique_labels:
            if label == -1:  # 跳过噪声点
                continue
            mask = labels == label
            cluster_data = data[mask]
            if len(cluster_data) > 0:
                center = np.mean(cluster_data, axis=0)
                inertia += np.sum((cluster_data - center) ** 2)
        
        return float(inertia)


class DataPreprocessor:
    """数据预处理器"""
    
    def __init__(self, config: ClusteringConfig):
        self.config = config
        self.scaler = None
        self.feature_names = None
    
    def preprocess(self, data: pd.DataFrame, features: Optional[List[str]] = None) -> Tuple[np.ndarray, List[str], pd.DataFrame]:
        """预处理数据"""
        # 特征选择
        if features is None:
            numeric_cols = data.select_dtypes(include=[np.number]).columns.tolist()
            if not numeric_cols:
                raise ValueError("没有可用于聚类的数值特征")
            features = numeric_cols[:self.config.max_features]
        
        # 验证特征存在性
        missing_features = [f for f in features if f not in data.columns]
        if missing_features:
            raise ValueError(f"特征不存在: {missing_features}")
        
        self.feature_names = features
        
        # 数据采样 - 对整个数据框进行采样，保持索引一致性
        if len(data) > self.config.max_samples:
            sampled_data = data.sample(n=self.config.max_samples, random_state=self.config.random_state)
        else:
            sampled_data = data.copy()
        
        # 提取聚类特征
        cluster_data = sampled_data[features].copy()
        
        # 处理缺失值
        if cluster_data.isnull().any().any():
            cluster_data = cluster_data.fillna(cluster_data.mean())
        
        # 移除常数列
        constant_cols = cluster_data.columns[cluster_data.std() == 0].tolist()
        if constant_cols:
            cluster_data = cluster_data.drop(columns=constant_cols)
            self.feature_names = [f for f in self.feature_names if f not in constant_cols]
            # 同时从采样数据中移除常数列
            sampled_data = sampled_data.drop(columns=constant_cols)
        
        if cluster_data.empty:
            raise ValueError("所有特征都是常数，无法进行聚类分析")
        
        # 数据标准化
        scaled_data = self._scale_data(cluster_data)
        
        return scaled_data, self.feature_names, sampled_data
    
    def _scale_data(self, data: pd.DataFrame) -> np.ndarray:
        """数据标准化"""
        if self.config.scaling_method == ScalingMethod.NONE:
            return data.values
        
        scaler_map = {
            ScalingMethod.STANDARD: StandardScaler(),
            ScalingMethod.MINMAX: MinMaxScaler(),
            ScalingMethod.ROBUST: RobustScaler()
        }
        
        self.scaler = scaler_map[self.config.scaling_method]
        return self.scaler.fit_transform(data)


class ClusterAnalyzer(LoggerMixin):
    """聚类分析器 - 主类"""
    
    def __init__(self, config: Optional[Config] = None):
        super().__init__()
        self.config = config or Config()
        self.clustering_config = ClusteringConfig()
        self.preprocessor = None
        self.algorithm = None
        self.results = {}
    
    def analyze(self, data: pd.DataFrame, features: Optional[List[str]] = None, 
               algorithm: str = 'kmeans', n_clusters: int = 3, **kwargs) -> Dict[str, Any]:
        """统一的分析接口
        
        Args:
            data: 输入数据
            features: 用于聚类的特征列表
            algorithm: 聚类算法
            n_clusters: 聚类数量
            **kwargs: 其他参数
            
        Returns:
            聚类分析结果
        """
        return self.perform_clustering(data, features, algorithm, n_clusters, **kwargs)
    
    def perform_clustering(self, 
                          data: pd.DataFrame,
                          features: Optional[List[str]] = None,
                          algorithm: str = 'kmeans',
                          n_clusters: int = 3,
                          **kwargs) -> Dict[str, Any]:
        """执行聚类分析
        
        Args:
            data: 输入数据
            features: 用于聚类的特征列表
            algorithm: 聚类算法
            n_clusters: 聚类数量
            **kwargs: 其他参数
            
        Returns:
            聚类结果字典
        """
        try:
            # 添加调试日志
            print(f"[CLUSTERING DEBUG] perform_clustering called with algorithm={algorithm}, n_clusters={n_clusters}")
            self.log_info(f"[CLUSTERING DEBUG] perform_clustering called with algorithm={algorithm}, n_clusters={n_clusters}")
            
            # 更新配置
            self._update_config(algorithm, n_clusters, **kwargs)
            
            # 数据预处理
            self.preprocessor = DataPreprocessor(self.clustering_config)
            scaled_data, feature_names, sampled_data = self.preprocessor.preprocess(data, features)
            
            # 数据质量检查
            self._validate_data(scaled_data, sampled_data)
            
            # 创建聚类算法
            self.algorithm = self._create_algorithm()
            
            # 执行聚类
            labels = self.algorithm.fit_predict(scaled_data)
            
            # 计算实际聚类数量
            actual_clusters = len(set(labels)) - (1 if -1 in labels else 0)
            
            # 计算评估指标
            metrics = ClusteringMetrics.calculate_all_metrics(scaled_data, labels)
            
            # 分析聚类特征 - 使用采样后的数据确保长度一致
            cluster_profiles = self._analyze_clusters(sampled_data, labels, feature_names)
            
            # 准备可视化数据
            visualization_data = self._prepare_visualization_data(scaled_data, labels) if self.clustering_config.enable_visualization else None
            
            # 构建结果
            result = {
                "success": True,
                "algorithm": algorithm,
                "n_clusters": actual_clusters,
                "features_used": feature_names,
                "data_size": len(data),
                "processed_size": len(scaled_data),
                "labels": labels.tolist(),
                "evaluation_metrics": metrics,
                "cluster_profiles": cluster_profiles,
                "visualization_data": visualization_data,
                "model_params": self.algorithm.get_model_params(),
                "cluster_centers": self._get_cluster_centers_info()
            }
            
            self.results = result
            self.log_info(f"聚类分析完成: {algorithm}, {actual_clusters}个聚类, 数据量: {len(data)}")
            
            return result
            
        except Exception as e:
            self.log_error(f"聚类分析失败: {str(e)}")
            return {"error": f"聚类分析失败: {str(e)}"}
    
    def find_optimal_clusters(self, 
                            data: pd.DataFrame,
                            features: Optional[List[str]] = None,
                            max_clusters: int = 10,
                            algorithm: str = 'kmeans') -> Dict[str, Any]:
        """寻找最优聚类数量"""
        try:
            if algorithm == 'dbscan':
                return self._optimize_dbscan_params(data, features)
            
            results = []
            
            for k in range(2, min(max_clusters + 1, len(data) // 3)):
                cluster_result = self.perform_clustering(
                    data, features, algorithm, k, enable_visualization=False
                )
                
                if cluster_result.get('success'):
                    metrics = cluster_result['evaluation_metrics']
                    if 'error' not in metrics:
                        results.append({
                            'n_clusters': k,
                            'silhouette_score': metrics.get('silhouette_score', 0),
                            'calinski_harabasz_score': metrics.get('calinski_harabasz_score', 0),
                            'davies_bouldin_score': metrics.get('davies_bouldin_score', float('inf')),
                            'inertia': metrics.get('inertia', float('inf'))
                        })
            
            if not results:
                return {"error": "无法计算最优聚类数量"}
            
            # 推荐最优聚类数量
            recommendations = self._calculate_optimal_clusters(results)
            
            return {
                "success": True,
                "results": results,
                "recommendations": recommendations,
                "suggested_clusters": recommendations.get('综合推荐', 3)
            }
            
        except Exception as e:
            self.log_error(f"寻找最优聚类数量失败: {str(e)}")
            return {"error": f"寻找最优聚类数量失败: {str(e)}"}
    
    def _update_config(self, algorithm: str, n_clusters: int, **kwargs):
        """更新聚类配置"""
        # 添加调试信息
        print(f"[UPDATE_CONFIG DEBUG] algorithm={algorithm}, type={type(algorithm)}")
        self.log_info(f"[UPDATE_CONFIG DEBUG] algorithm={algorithm}, type={type(algorithm)}")
        
        # 字符串到枚举的映射
        algorithm_map = {
            'kmeans': ClusteringAlgorithm.KMEANS,
            'hierarchical': ClusteringAlgorithm.HIERARCHICAL,
            'dbscan': ClusteringAlgorithm.DBSCAN,
            'spectral': ClusteringAlgorithm.SPECTRAL
        }
        
        print(f"[UPDATE_CONFIG DEBUG] algorithm_map keys: {list(algorithm_map.keys())}")
        print(f"[UPDATE_CONFIG DEBUG] algorithm in algorithm_map: {algorithm in algorithm_map}")
        self.log_info(f"[UPDATE_CONFIG DEBUG] algorithm_map keys: {list(algorithm_map.keys())}")
        self.log_info(f"[UPDATE_CONFIG DEBUG] algorithm in algorithm_map: {algorithm in algorithm_map}")
        
        if algorithm not in algorithm_map:
            print(f"[UPDATE_CONFIG DEBUG] ERROR: 不支持的聚类算法: {algorithm}")
            self.log_error(f"[UPDATE_CONFIG DEBUG] ERROR: 不支持的聚类算法: {algorithm}")
            raise ValueError(f"不支持的聚类算法: {algorithm}")
        
        self.clustering_config.algorithm = algorithm_map[algorithm]
        self.clustering_config.n_clusters = n_clusters
        
        # 更新通用参数
        for key in ['max_samples', 'max_features', 'random_state', 'enable_visualization']:
            if key in kwargs:
                setattr(self.clustering_config, key, kwargs[key])
        
        # 更新算法特定参数
        if algorithm == 'kmeans' and 'kmeans_params' in kwargs:
            self.clustering_config.kmeans_params.update(kwargs['kmeans_params'])
        elif algorithm == 'dbscan' and 'dbscan_params' in kwargs:
            self.clustering_config.dbscan_params.update(kwargs['dbscan_params'])
        elif algorithm == 'hierarchical' and 'hierarchical_params' in kwargs:
            self.clustering_config.hierarchical_params.update(kwargs['hierarchical_params'])
        elif algorithm == 'spectral' and 'spectral_params' in kwargs:
            self.clustering_config.spectral_params.update(kwargs['spectral_params'])
        
        # 处理直接传入的参数
        if algorithm == 'dbscan':
            for param in ['eps', 'min_samples']:
                if param in kwargs:
                    self.clustering_config.dbscan_params[param] = kwargs[param]
    
    def _create_algorithm(self) -> BaseClusteringAlgorithm:
        """创建聚类算法实例"""
        algorithm_map = {
            ClusteringAlgorithm.KMEANS: KMeansAlgorithm,
            ClusteringAlgorithm.DBSCAN: DBSCANAlgorithm,
            ClusteringAlgorithm.HIERARCHICAL: HierarchicalAlgorithm,
            ClusteringAlgorithm.SPECTRAL: SpectralAlgorithm
        }
        
        algorithm_class = algorithm_map.get(self.clustering_config.algorithm)
        if not algorithm_class:
            raise ValueError(f"不支持的聚类算法: {self.clustering_config.algorithm}")
        
        return algorithm_class(self.clustering_config)
    
    def _validate_data(self, scaled_data: np.ndarray, original_data: pd.DataFrame):
        """验证数据质量"""
        if len(scaled_data) < 3:
            raise ValueError("数据量太少，无法进行聚类分析")
        
        # DBSCAN不需要预设聚类数量
        if self.clustering_config.algorithm not in [ClusteringAlgorithm.DBSCAN]:
            if self.clustering_config.n_clusters >= len(scaled_data):
                raise ValueError(f"聚类数({self.clustering_config.n_clusters})不能大于等于数据量({len(scaled_data)})")
    
    def _analyze_clusters(self, data: pd.DataFrame, labels: np.ndarray, features: List[str]) -> Dict[str, Any]:
        """分析聚类特征"""
        cluster_profiles = {}
        data_with_labels = data.copy()
        data_with_labels['cluster'] = labels
        
        unique_labels = np.unique(labels)
        
        for label in unique_labels:
            if label == -1:  # 噪声点
                mask = labels == label
                cluster_profiles["noise"] = {
                    "size": int(mask.sum()),
                    "percentage": float(mask.sum() / len(data) * 100),
                    "description": "噪声点"
                }
                continue
            
            mask = labels == label
            cluster_subset = data_with_labels[mask]
            
            if len(cluster_subset) == 0:
                continue
            
            profile = {
                "size": int(mask.sum()),
                "percentage": float(mask.sum() / len(data) * 100),
                "center": {},
                "statistics": {},
                "categorical_distribution": {}
            }
            
            # 数值特征统计
            for feature in features:
                if feature in cluster_subset.columns:
                    feature_data = cluster_subset[feature].dropna()
                    if len(feature_data) > 0:
                        profile["center"][feature] = float(feature_data.mean())
                        profile["statistics"][feature] = {
                            "mean": float(feature_data.mean()),
                            "std": float(feature_data.std()),
                            "min": float(feature_data.min()),
                            "max": float(feature_data.max()),
                            "median": float(feature_data.median()),
                            "q25": float(feature_data.quantile(0.25)),
                            "q75": float(feature_data.quantile(0.75))
                        }
            
            # 分类特征分布
            categorical_cols = data.select_dtypes(include=['object', 'category']).columns
            for col in categorical_cols:
                if col in cluster_subset.columns:
                    dist = cluster_subset[col].value_counts(normalize=True)
                    profile["categorical_distribution"][col] = {
                        str(k): float(v) for k, v in dist.head(5).to_dict().items()
                    }
            
            cluster_profiles[f"cluster_{label}"] = profile
        
        return cluster_profiles
    
    def _prepare_visualization_data(self, data: np.ndarray, labels: np.ndarray) -> Dict[str, Any]:
        """准备可视化数据"""
        try:
            # 降维到2D用于可视化
            if data.shape[1] > 2:
                # 统一使用PCA进行降维，避免t-SNE的长时间运行
                reducer = PCA(n_components=2, random_state=self.clustering_config.random_state)
                data_2d = reducer.fit_transform(data)
                method = "PCA"
                explained_variance = reducer.explained_variance_ratio_.tolist()
            else:
                data_2d = data
                method = "原始数据"
                explained_variance = None
            
            return {
                "coordinates": data_2d.tolist(),
                "labels": labels.tolist(),
                "method": method,
                "explained_variance_ratio": explained_variance,
                "dimensions": data_2d.shape[1]
            }
            
        except Exception as e:
            self.log_warning(f"准备可视化数据失败: {str(e)}")
            return {"error": str(e)}
    
    def _get_cluster_centers_info(self) -> Optional[Dict[str, Any]]:
        """获取聚类中心信息"""
        if not self.algorithm:
            return None
        
        centers = self.algorithm.get_cluster_centers()
        if centers is None:
            return None
        
        return {
            "centers": centers.tolist(),
            "feature_names": self.preprocessor.feature_names if self.preprocessor else None
        }
    
    def _optimize_dbscan_params(self, data: pd.DataFrame, features: Optional[List[str]] = None) -> Dict[str, Any]:
        """优化DBSCAN参数"""
        try:
            # 预处理数据
            preprocessor = DataPreprocessor(self.clustering_config)
            scaled_data, feature_names, _ = preprocessor.preprocess(data, features)
            
            # 使用k-distance图来估计eps
            k = 5  # 通常使用k=4或5
            nbrs = NearestNeighbors(n_neighbors=k).fit(scaled_data)
            distances, indices = nbrs.kneighbors(scaled_data)
            distances = np.sort(distances[:, k-1], axis=0)
            
            # 简单的拐点检测
            diffs = np.diff(distances)
            knee_point = np.argmax(diffs) if len(diffs) > 0 else len(distances) // 2
            suggested_eps = distances[knee_point]
            
            # 测试不同的参数组合
            eps_values = [suggested_eps * 0.5, suggested_eps, suggested_eps * 1.5]
            min_samples_values = [3, 5, 10]
            
            results = []
            for eps in eps_values:
                for min_samples in min_samples_values:
                    if min_samples >= len(scaled_data):
                        continue
                    
                    dbscan = DBSCAN(eps=eps, min_samples=min_samples)
                    labels = dbscan.fit_predict(scaled_data)
                    
                    n_clusters = len(set(labels)) - (1 if -1 in labels else 0)
                    n_noise = list(labels).count(-1)
                    
                    if n_clusters > 0:
                        metrics = ClusteringMetrics.calculate_all_metrics(scaled_data, labels)
                        if 'error' not in metrics:
                            results.append({
                                'eps': eps,
                                'min_samples': min_samples,
                                'n_clusters': n_clusters,
                                'n_noise': n_noise,
                                'noise_ratio': n_noise / len(scaled_data),
                                'silhouette_score': metrics.get('silhouette_score', 0)
                            })
            
            if not results:
                return {"error": "无法找到合适的DBSCAN参数"}
            
            # 选择最佳参数（平衡聚类质量和噪声比例）
            best_result = max(results, key=lambda x: x['silhouette_score'] - x['noise_ratio'] * 0.5)
            
            return {
                "success": True,
                "suggested_params": {
                    "eps": best_result['eps'],
                    "min_samples": best_result['min_samples']
                },
                "all_results": results,
                "k_distance_analysis": {
                    "suggested_eps": suggested_eps,
                    "knee_point_index": knee_point
                }
            }
            
        except Exception as e:
            return {"error": f"DBSCAN参数优化失败: {str(e)}"}
    
    def _calculate_optimal_clusters(self, results: List[Dict]) -> Dict[str, int]:
        """计算最优聚类数量推荐"""
        recommendations = {}
        
        # 基于轮廓系数
        best_silhouette = max(results, key=lambda x: x['silhouette_score'])
        recommendations['轮廓系数最优'] = best_silhouette['n_clusters']
        
        # 基于Calinski-Harabasz指数
        best_calinski = max(results, key=lambda x: x['calinski_harabasz_score'])
        recommendations['Calinski-Harabasz最优'] = best_calinski['n_clusters']
        
        # 基于Davies-Bouldin指数
        best_davies = min(results, key=lambda x: x['davies_bouldin_score'])
        recommendations['Davies-Bouldin最优'] = best_davies['n_clusters']
        
        # 肘部法则（基于惯性）
        if len(results) >= 3:
            inertias = [r['inertia'] for r in results]
            # 简单的肘部检测
            diffs = [inertias[i] - inertias[i+1] for i in range(len(inertias)-1)]
            if diffs:
                elbow_idx = diffs.index(max(diffs))
                recommendations['肘部法则'] = results[elbow_idx]['n_clusters']
        
        # 综合推荐（投票机制）
        votes = {}
        for rec in recommendations.values():
            votes[rec] = votes.get(rec, 0) + 1
        
        if votes:
            comprehensive = max(votes.items(), key=lambda x: x[1])[0]
            recommendations['综合推荐'] = comprehensive
        
        return recommendations


# 向后兼容性
def create_cluster_analyzer(config: Optional[Config] = None) -> ClusterAnalyzer:
    """创建聚类分析器实例"""
    return ClusterAnalyzer(config)


# 快速聚类分析函数
def quick_clustering(data: pd.DataFrame, 
                    algorithm: str = 'kmeans', 
                    n_clusters: int = 3,
                    features: Optional[List[str]] = None) -> Dict[str, Any]:
    """快速聚类分析"""
    analyzer = ClusterAnalyzer()
    return analyzer.perform_clustering(data, features, algorithm, n_clusters)