import numpy as np
import pandas as pd
from sklearn.datasets import load_iris, load_wine, make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin_min
from tabulate import tabulate
import matplotlib.pyplot as plt


# 计算局部密度
def compute_density(X, epsilon=0.5):
    n_samples = X.shape[0]
    density = np.zeros(n_samples)

    for i in range(n_samples):
        distances = np.linalg.norm(X - X[i], axis=1)
        density[i] = np.sum(distances < epsilon)  # 计算ε邻域内的数据点数
    return density


# 初始化簇中心（智能初始化）
def smart_init(X, k, epsilon=0.5, threshold=0.1):
    n_samples = X.shape[0]

    # 计算每个点的局部密度
    density = compute_density(X, epsilon)

    # 按密度排序，选择前k个密度较高的点作为初始簇中心
    density_sorted_indices = np.argsort(density)[::-1]
    centroids = X[density_sorted_indices[:k]]

    # 确保簇中心之间的距离大于某个阈值
    centroids = [centroids[0]]  # 初始化簇中心列表
    for i in range(1, k):
        for candidate in X[density_sorted_indices[k:]]:
            if np.min(np.linalg.norm(candidate - np.array(centroids), axis=1)) > threshold:
                centroids.append(candidate)
                break
    return np.array(centroids)


# 动态调整簇中心
def update_centroids(X, labels, density, k):
    centroids = np.zeros((k, X.shape[1]))
    for i in range(k):
        cluster_points = X[labels == i]
        if cluster_points.shape[0] > 0:
            # 加权更新簇中心：权重为密度
            weights = density[labels == i]
            weighted_points = cluster_points * weights[:, np.newaxis]
            centroids[i] = np.sum(weighted_points, axis=0) / np.sum(weights)
    return centroids


# K-Means 聚类（包括智能初始化和动态调整簇中心）
def improved_kmeans(X, k, max_iters=300, epsilon=0.5, threshold=0.1):
    # 智能初始化簇中心
    centroids = smart_init(X, k, epsilon, threshold)

    # 记录每次迭代的簇标签
    labels = np.zeros(X.shape[0])
    prev_labels = np.zeros_like(labels)

    for i in range(max_iters):
        # 计算每个点到簇中心的距离，并分配给最近的簇中心
        prev_labels[:] = labels
        labels = np.argmin(np.linalg.norm(X[:, np.newaxis] - centroids, axis=2), axis=1)

        # 动态调整簇中心
        density = compute_density(X, epsilon)
        centroids = update_centroids(X, labels, density, k)

        # 判断是否收敛
        if np.all(labels == prev_labels):
            break

    # 计算SSE (Sum of Squared Errors)
    sse = np.sum(np.min(pairwise_distances_argmin_min(X, centroids)[1] ** 2))
    return centroids, labels, sse, i + 1  # 返回聚类结果和收敛迭代次数


# 绘制聚类结果的图示
def plot_clustering(X, labels, centroids, algorithm_name, dataset_name):
    plt.figure(figsize=(8, 6))

    # 使用不同颜色绘制不同簇
    for i in range(np.max(labels) + 1):
        plt.scatter(X[labels == i, 0], X[labels == i, 1], label=f'Cluster {i + 1}')

    # 绘制簇中心
    plt.scatter(centroids[:, 0], centroids[:, 1], s=200, c='black', marker='X', label='Centroids')

    plt.title(f'{algorithm_name} - {dataset_name}')
    plt.xlabel('Feature 1')
    plt.ylabel('Feature 2')
    plt.legend()
    plt.grid(True)
    plt.show()


# 实验设置
def run_experiment(X, k, dataset_name):
    # 记录结果
    results = []

    # 1. 使用标准 K-Means
    kmeans = KMeans(n_clusters=k, random_state=42)
    kmeans.fit(X)
    sse_kmeans = kmeans.inertia_
    results.append([dataset_name, "K-Means", sse_kmeans, kmeans.n_iter_])

    # 绘制标准K-Means结果
    plot_clustering(X, kmeans.labels_, kmeans.cluster_centers_, 'K-Means', dataset_name)

    # 2. 使用 K-Means++
    kmeans_plus = KMeans(n_clusters=k, init="k-means++", random_state=42)
    kmeans_plus.fit(X)
    sse_kmeans_plus = kmeans_plus.inertia_
    results.append([dataset_name, "K-Means++", sse_kmeans_plus, kmeans_plus.n_iter_])

    # 绘制K-Means++结果
    plot_clustering(X, kmeans_plus.labels_, kmeans_plus.cluster_centers_, 'K-Means++', dataset_name)

    # 3. 使用改进的 K-Means
    centroids_improved, labels_improved, sse_improved, iters_improved = improved_kmeans(X, k)
    results.append([dataset_name, "Improved K-Means", sse_improved, iters_improved])

    # 绘制改进的K-Means结果
    plot_clustering(X, labels_improved, centroids_improved, 'Improved K-Means', dataset_name)

    return results


# 运行实验并打印表格
def run_all_experiments():
    # 定义要使用的数据集和目标簇数量
    datasets = [
        (load_iris(), 3, "Iris"),  # Iris 数据集，3个簇
        (load_wine(), 3, "Wine"),  # Wine 数据集，3个簇
        (make_blobs(n_samples=500, centers=4, random_state=42), 4, "Synthetic Blobs")  # Synthetic Blobs 数据集，4个簇
    ]

    all_results = []

    # 对每个数据集运行实验
    for data, k, name in datasets:
        X = data.data if hasattr(data, 'data') else data[0]  # 加载数据
        results = run_experiment(X, k, name)
        all_results.extend(results)

    # 打印结果表格
    df = pd.DataFrame(all_results, columns=["Dataset", "Algorithm", "SSE", "Iterations"])
    print(tabulate(df, headers="keys", tablefmt="grid", showindex=False))


# 运行所有实验
run_all_experiments()
