import numpy as np
import matplotlib.pyplot as plt
import pandas as pd

plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False


def select_initial_centroids(data, num_clusters):
    """从数据集中随机选择初始质心"""
    return data[np.random.choice(data.shape[0], num_clusters, replace=False)]


def allocate_to_nearest_centroid(data_points, centroids):
    """为每个数据点分配到最近的质心所属簇"""
    distances = np.sqrt(((data_points[:, np.newaxis, :] - centroids) ** 2).sum(axis=2))
    return np.argmin(distances, axis=1)


def compute_new_centroids(data_points, assignments, num_clusters):
    """根据当前的簇分配计算新的质心位置"""
    new_centroids = []
    for cluster_idx in range(num_clusters):
        assigned_points = data_points[assignments == cluster_idx]
        if len(assigned_points) > 0:
            new_centroids.append(assigned_points.mean(axis=0))
        else:
            # 如果某簇没有分配到任何点，则保留旧质心或随机选择一个新质心
            new_centroids.append(np.zeros(data_points.shape[1]))
    return np.array(new_centroids)


def perform_k_means_clustering(data_points, num_clusters, max_iters=300, convergence_threshold=1e-4):
    """执行K-Means聚类算法"""
    centroids = select_initial_centroids(data_points, num_clusters)
    for iteration in range(max_iters):
        old_centroids = centroids.copy()
        cluster_assignments = allocate_to_nearest_centroid(data_points, centroids)
        centroids = compute_new_centroids(data_points, cluster_assignments, num_clusters)

        # 检查是否收敛
        if np.all(np.linalg.norm(centroids - old_centroids, axis=1) < convergence_threshold):
            break

    return centroids, cluster_assignments


def visualize_clustering_results(data_points, final_centroids, cluster_assignments, num_clusters):
    """可视化聚类结果，仅适用于二维特征空间"""
    fig, ax = plt.subplots()
    colors = ['blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black']  # 可扩展更多颜色以适应更多簇
    for cluster_idx in range(num_clusters):
        cluster_data = data_points[cluster_assignments == cluster_idx]
        ax.scatter(cluster_data[:, 0], cluster_data[:, 1], c=colors[cluster_idx % len(colors)],
                   label=f'簇 {cluster_idx + 1}')

    ax.scatter(final_centroids[:, 0], final_centroids[:, 1], s=150, c='red', marker='X', label='质心')
    ax.set_title('数据集 K-Means 聚类分析')
    ax.set_xlabel('特征维度 1')
    ax.set_ylabel('特征维度 2')
    ax.legend()
    plt.show()


def conduct_k_means_analysis():
    """加载数据并执行K-Means聚类分析"""
    column_names = [
        'Class', 'Alcohol', 'Malic acid', 'Ash', 'Alcalinity of ash', 'Magnesium',
        'Total phenols', 'Flavanoids', 'Nonflavanoid phenols', 'Proanthocyanins',
        'Color intensity', 'Hue', 'OD280/OD315 of diluted wines', 'Proline'
    ]
    dataset = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data', header=None,
                          names=column_names)

    # 提取特征列，去除类别标签
    feature_columns = dataset.iloc[:, 1:].values

    # 设定聚类数量（这里假设我们知道正确的分类数）
    cluster_count = 3

    # 执行K-Means聚类
    final_centroids, cluster_labels = perform_k_means_clustering(feature_columns, cluster_count)

    # 展示聚类结果
    visualize_clustering_results(feature_columns, final_centroids, cluster_labels, cluster_count)


if __name__ == "__main__":
    conduct_k_means_analysis()