import cv2
import numpy as np
import logging
from typing import Tuple, List, Union
from sklearn.cluster import KMeans, DBSCAN
from sklearn.metrics import silhouette_score
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import NearestNeighbors

# 设置日志
def setup_logger():
    logger = logging.getLogger('KMeansClustering')
    if not logger.handlers:
        logger.setLevel(logging.INFO)
        handler = logging.StreamHandler()
        handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
        logger.addHandler(handler)
    return logger

logger = setup_logger()

def determine_optimal_clusters(data: np.ndarray, max_clusters: int = 10) -> int:
    """
    使用轮廓系数和颜色分离度确定最优聚类数量
    
    Args:
        data: 需要聚类的数据点
        max_clusters: 尝试的最大聚类数量
        
    Returns:
        最优聚类数量
    """
    logger.info("使用轮廓系数确定最优聚类数量...")
    
    # 对数据进行采样以提高效率
    n_samples = len(data)
    if n_samples > 20000:
        logger.info(f"数据点数量过多 ({n_samples})，进行采样...")
        indices = np.random.choice(n_samples, 20000, replace=False)
        data_sampled = data[indices]
        logger.info("采样后数据点数量: 20000")
    else:
        data_sampled = data
    
    # 初始化
    silhouette_scores = []
    color_sep_scores = []
    best_score = -1
    best_n_clusters = 2
    
    # 计算数据点的数量
    n_samples = len(data_sampled)
    # 调整max_clusters，确保每个聚类至少有100个点
    max_possible_clusters = min(max_clusters, n_samples // 100)
    logger.info(f"数据点数量: {n_samples}, 最大可能聚类数: {max_possible_clusters}")
    
    # 保存聚类结果以便后续分析
    kmeans_results = {}
    
    for n_clusters in range(2, max_possible_clusters + 1):
        logger.info(f"尝试聚类数量: {n_clusters}")
        
        # 使用更多次初始化提高KMeans稳定性
        kmeans = KMeans(n_clusters=n_clusters, init='k-means++', n_init=15, max_iter=500, random_state=42)
        cluster_labels = kmeans.fit_predict(data_sampled)
        kmeans_results[n_clusters] = (kmeans, cluster_labels)
        
        # 计算轮廓系数
        score = silhouette_score(data_sampled, cluster_labels)
        silhouette_scores.append(score)
        logger.info(f"聚类数量 {n_clusters} 的轮廓系数: {score:.4f}")
        
        if score > best_score:
            best_score = score
            best_n_clusters = n_clusters
            logger.info(f"找到更好的聚类数量: {best_n_clusters}, 轮廓系数: {best_score:.4f}")
        
        # 计算颜色分离程度
        centers = kmeans.cluster_centers_
        # 计算聚类中心间的最小欧式距离
        min_dist = float('inf')
        for i in range(n_clusters):
            for j in range(i+1, n_clusters):
                dist = np.linalg.norm(centers[i] - centers[j])
                min_dist = min(min_dist, dist)
        
        # 计算类内方差平均值
        intra_variance = 0
        for i in range(n_clusters):
            cluster_points = data_sampled[cluster_labels == i]
            if len(cluster_points) > 0:
                variance = np.mean(np.var(cluster_points, axis=0))
                intra_variance += variance
        intra_variance /= n_clusters if n_clusters > 0 else 1
        
        # 颜色分离得分：中心距离与内部方差的比值，越高越好
        sep_score = min_dist / (intra_variance + 1e-5)
        color_sep_scores.append(sep_score)
        logger.info(f"聚类数量 {n_clusters} 的颜色分离度: {sep_score:.4f}")
    
    # 如果只有一个聚类数量被测试，直接返回
    if len(silhouette_scores) <= 1:
        logger.info(f"只有一个聚类数量被测试，使用 {best_n_clusters}")
        return best_n_clusters
    
    # 额外验证：检查最终选择的聚类是否有意义
    kmeans_final, labels_final = kmeans_results[best_n_clusters]
    centers_final = kmeans_final.cluster_centers_
    
    # 计算每个聚类中的点数，确保没有过小的聚类
    cluster_sizes = [np.sum(labels_final == i) for i in range(best_n_clusters)]
    min_size = min(cluster_sizes)
    if min_size < n_samples * 0.01:  # 如果最小的聚类不到样本数的1%
        logger.warning(f"选择的聚类数 {best_n_clusters} 中存在过小的聚类({min_size}点)，可能需要调整")
        
        # 可能需要减少聚类数量
        if best_n_clusters > 2:
            for k in range(best_n_clusters-1, 1, -1):
                if k in kmeans_results:
                    _, labels_k = kmeans_results[k]
                    cluster_sizes_k = [np.sum(labels_k == i) for i in range(k)]
                    if min(cluster_sizes_k) >= n_samples * 0.01:
                        best_n_clusters = k
                        logger.info(f"调整聚类数量至 {best_n_clusters}，以避免过小的聚类")
                        break
    
    logger.info(f"最终确定的最优聚类数量: {best_n_clusters}")
    return best_n_clusters

def apply_kmeans_clustering(image: np.ndarray, n_clusters=None) -> Union[Tuple[np.ndarray, List[np.ndarray], List[np.ndarray]], None]:
    """
    对图像应用改进的K-means++聚类，并生成分层结果
    
    Args:
        image: BGR格式的输入图像
        n_clusters: 聚类数量，如果为None则自动确定
        
    Returns:
        Union[Tuple[np.ndarray, List[np.ndarray], List[np.ndarray]], None]: 
        - 第一个元素是完整的聚类结果图像
        - 第二个元素是分层结果列表，每个元素是一个图层图像
        - 第三个元素是分层掩码列表，每个元素是一个聚类掩码
        - 如果处理失败则返回None
    """
    try:
        # 检查图像有效性
        if image is None or image.size == 0:
            logger.error("输入图像无效")
            return None
            
        if len(image.shape) != 3:
            logger.error(f"输入图像维度不正确: {image.shape}")
            return None
            
        height, width = image.shape[:2]
        image_size_mb = image.nbytes / (1024 * 1024)
        logger.info(f"开始处理图像，尺寸: {width}x{height}, 大小: {image_size_mb:.2f}MB")
        
        # 1. 图像预处理
        logger.info("开始图像预处理...")
        
        # 1.1 转换为LAB颜色空间
        lab_image = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
        
        # 1.2 应用双边滤波去除噪点但保留边缘
        denoised = cv2.bilateralFilter(lab_image, 9, 75, 75)
        
        # 1.3 提取非边缘区域
        from edge_detection import apply_canny_edge_detection
        edges = apply_canny_edge_detection(image)
        non_edge_mask = edges
        non_edge_indices = np.where(non_edge_mask.reshape(-1) > 0)[0]
        
        # 1.4 准备聚类数据
        pixels_to_cluster = lab_image.reshape(-1, 3)[non_edge_indices]
        
        # 1.5 标准化数据
        scaler = StandardScaler()
        pixels_scaled = scaler.fit_transform(pixels_to_cluster)
        
        # 1.6 添加空间信息（可选）
        y_coords, x_coords = np.unravel_index(non_edge_indices, (height, width))
        spatial_info = np.column_stack((x_coords/width, y_coords/height))  # 归一化坐标
        pixels_with_spatial = np.hstack((pixels_scaled, spatial_info))
        
        logger.info(f"需要聚类的像素数量: {len(pixels_to_cluster)}")
        
        if len(pixels_to_cluster) == 0:
            logger.error("没有非边缘像素可以聚类")
            return None
            
        if len(pixels_to_cluster) < 1000:
            logger.warning("像素数量太少，可能不适合聚类")
            return None
        
        # 2. 自动确定聚类数量
        if n_clusters is None:
            n_clusters = determine_optimal_clusters(pixels_with_spatial)
            logger.info(f"确定的最优聚类数量: {n_clusters}")
        
        # 3. 应用改进的K-means++聚类
        logger.info("开始K-means++聚类...")
        kmeans = KMeans(
            n_clusters=n_clusters,
            init='k-means++',
            n_init=20,  # 增加初始化次数
            max_iter=500,  # 增加迭代次数
            random_state=42
        )
        cluster_labels = kmeans.fit_predict(pixels_with_spatial)
        logger.info("完成K-means++聚类")
        
        # 4. 创建分层结果
        logger.info("创建分层结果...")
        layer_results = []
        layer_masks = []
        
        # 创建完整的标签映射
        label_map = np.full((height * width,), -1)  # 初始化为-1
        label_map[non_edge_indices] = cluster_labels
        label_map = label_map.reshape(height, width)
        
        # 创建聚类结果图像
        clustered_image = np.ones((height, width, 3), dtype=np.uint8) * 255  # 初始化为白色
        
        # 5. 为每个聚类创建图层并进行后处理
        for i in range(n_clusters):
            logger.info(f"处理第 {i+1}/{n_clusters} 个聚类")
            
            # 5.1 创建掩码
            mask = (label_map == i).astype(np.uint8) * 255
            
            # 5.2 对掩码进行形态学操作
            kernel_sizes = [(3,3), (5,5)]  # 使用多个核大小
            for kernel_size in kernel_sizes:
                kernel = np.ones(kernel_size, np.uint8)
                # 开运算去除小噪点
                mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
                # 闭运算填充小孔洞
                mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
            
            # 5.3 连通区域分析
            num_labels, labels = cv2.connectedComponents(mask)
            for label in range(1, num_labels):
                component_size = np.sum(labels == label)
                if component_size < height * width * 0.001:  # 如果组件面积小于总面积的0.1%
                    mask[labels == label] = 0
            
            # 5.4 保存掩码
            layer_masks.append(mask)
            
            # 5.5 创建图层
            layer_image = np.full_like(image, 255)
            cluster_pixels = (mask == 255)
            
            # 5.6 获取聚类中心颜色（转换回BGR空间）
            center_lab = kmeans.cluster_centers_[i][:3]  # 只取LAB通道
            center_lab = scaler.inverse_transform(center_lab.reshape(1, -1))[0]
            center_bgr = cv2.cvtColor(center_lab.reshape(1, 1, 3).astype(np.uint8), cv2.COLOR_LAB2BGR)[0, 0]
            
            # 5.7 应用颜色平滑
            if np.any(cluster_pixels):
                # 获取该区域的原图颜色
                region_colors = image[cluster_pixels]
                # 使用中值滤波平滑颜色
                smoothed_colors = cv2.medianBlur(region_colors.reshape(-1, 3), 3).reshape(region_colors.shape)
                # 将平滑后的颜色应用到图层
                layer_image[cluster_pixels] = smoothed_colors
                clustered_image[cluster_pixels] = smoothed_colors
            
            # 记录聚类中心颜色
            logger.info(f"聚类 {i+1} 的中心颜色 (BGR): {center_bgr}")
            
            layer_results.append(layer_image)
        
        logger.info(f"成功完成聚类和分层处理，共 {n_clusters} 个聚类")
        return clustered_image, layer_results, layer_masks
        
    except Exception as e:
        logger.error(f"聚类过程中发生错误: {str(e)}")
        import traceback
        logger.error(traceback.format_exc())
        return None

if __name__ == "__main__":
    # 测试代码
    import os
    
    # 创建测试图像 - 一个简单的色彩图像
    test_image = np.ones((500, 500, 3), dtype=np.uint8) * 255  # 白色背景
    cv2.rectangle(test_image, (100, 100), (300, 300), (0, 0, 255), -1)  # 红色矩形
    cv2.rectangle(test_image, (350, 350), (450, 450), (0, 255, 0), -1)  # 绿色矩形
    cv2.circle(test_image, (200, 400), 50, (255, 0, 0), -1)  # 蓝色圆形
    cv2.circle(test_image, (400, 200), 50, (0, 255, 255), -1)  # 黄色圆形
    
    # 添加一些噪声
    noise = np.random.normal(0, 10, test_image.shape).astype(np.int16)
    test_image = np.clip(test_image.astype(np.int16) + noise, 0, 255).astype(np.uint8)
    
    # 应用Kmeans++聚类
    result = apply_kmeans_clustering(test_image)
    if result is not None:
        clustered_image, layer_results, layer_masks = result
        
        # 保存结果
        os.makedirs("results", exist_ok=True)
        cv2.imwrite("results/test_image.png", test_image)
        cv2.imwrite("results/clustered_image.png", clustered_image)
        
        # 保存每个图层
        for i, (layer, mask) in enumerate(zip(layer_results, layer_masks)):
            cv2.imwrite(f"results/layer_{i+1}.png", layer)
            cv2.imwrite(f"results/mask_{i+1}.png", mask)
        
        print(f"聚类测试完成，共生成 {len(layer_results)} 个聚类，结果已保存到 results 目录")
    else:
        print("聚类处理失败") 