import numpy as np
from scipy.spatial.distance import pdist, squareform
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs

def generate_test_data():
    centers = [[0, 0], [3, 3], [6, 0]]
    X, y = make_blobs(n_samples=300, centers=centers, cluster_std=0.5, random_state=42)
    
    x_min, x_max = X[:, 0].min()-1, X[:, 0].max()+1
    y_min, y_max = X[:, 1].min()-1, X[:, 1].max()+1
    
    noise_x = np.random.uniform(x_min, x_max, size=30)
    noise_y = np.random.uniform(y_min, y_max, size=30)
    noise = np.column_stack((noise_x, noise_y))
    
    data = np.vstack([X, noise])
    true_labels = np.hstack([y, np.full(30, -1)])
    return data, true_labels

def find_optimal_dc(distances):
    """通过最小化信息熵找到最优dc值"""
    # 生成候选dc范围
    triu_dist = distances[np.triu_indices_from(distances, k=1)]
    dc_candidates = np.linspace(np.percentile(triu_dist, 1), 
                               np.percentile(triu_dist, 30), 
                               50)
    
    min_entropy = float('inf')
    best_dc = dc_candidates[0]
    
    for dc in dc_candidates:
        if dc <= 0: continue
        
        # 计算高斯核密度
        rho = np.sum(np.exp(-(distances/dc)**2), axis=1)
        rho_normalized = rho / (rho.sum() + 1e-10)
        
        # 计算信息熵
        entropy = -np.sum(rho_normalized * np.log(rho_normalized + 1e-10))
        
        # 更新最小值
        if entropy < min_entropy:
            min_entropy = entropy
            best_dc = dc
    
    # 最终dc值为最优dc的3/sqrt(2)倍
    return best_dc * 3 / np.sqrt(2)

def density_peak_clustering(data, n_clusters=3, noise_percent=5):
    distances = squareform(pdist(data))
    n = data.shape[0]
    
    # 通过信息熵确定最优dc
    dc = find_optimal_dc(distances)
    print(f"Auto selected dc: {dc:.4f}")
    
    # 计算高斯核密度
    rho = np.sum(np.exp(-(distances/dc)**2), axis=1)
    
    # 计算最小距离和最近更高密度点
    order = np.argsort(-rho)
    delta = np.zeros(n)
    nearest_higher = np.zeros(n, dtype=int)
    
    delta[order[0]] = np.max(distances[order[0]])
    nearest_higher[order[0]] = -1
    
    for i in range(1, n):
        current_idx = order[i]
        higher_rho = order[:i]
        dists = distances[current_idx, higher_rho]
        if len(dists) > 0:
            min_idx = np.argmin(dists)
            delta[current_idx] = dists[min_idx]
            nearest_higher[current_idx] = higher_rho[min_idx]
        else:
            delta[current_idx] = 0.0
            nearest_higher[current_idx] = -1
    
    # 选择聚类中心
    product = rho * delta
    centers = np.argsort(-product)[:n_clusters]
    
    # 分配类别
    labels = np.full(n, -1, dtype=int)
    for cluster_id, center in enumerate(centers):
        labels[center] = cluster_id
    
    for idx in order:
        if labels[idx] == -1:
            labels[idx] = labels[nearest_higher[idx]]
    
    # 标记噪声
    if noise_percent > 0:
        rho_threshold = np.percentile(rho, noise_percent)
        labels[rho < rho_threshold] = -1
    
    return labels, centers, rho, delta, dc

# 生成测试数据
data, true_labels = generate_test_data()

# 执行聚类
labels, centers, rho, delta, optimal_dc = density_peak_clustering(data)

# 可视化结果
plt.figure(figsize=(18, 6))

# 真实分布
plt.subplot(1, 4, 1)
plt.scatter(data[:, 0], data[:, 1], c=true_labels, cmap='viridis', s=10)
plt.title("True Distribution")

# 密度分布
plt.subplot(1, 4, 2)
plt.scatter(data[:, 0], data[:, 1], c=rho, cmap='viridis', s=10)
plt.colorbar(label='Density')
plt.title("Density Distribution")

# 决策图
plt.subplot(1, 4, 3)
plt.scatter(rho, delta, s=10)
plt.scatter(rho[centers], delta[centers], c='red', marker='X', s=100)
plt.xlabel('Density (rho)')
plt.ylabel('Delta')
plt.title(f"Decision Graph (dc={optimal_dc:.2f})")

# 聚类结果
plt.subplot(1, 4, 4)
plt.scatter(data[:, 0], data[:, 1], c=labels, cmap='viridis', s=10)
plt.scatter(data[centers, 0], data[centers, 1], c='red', marker='X', s=100)
plt.title("Clustering Result")

plt.tight_layout()
plt.show()
