import random
import math
import copy


# 计算欧氏距离
def euclidean_distance(a, b):
    return math.sqrt(sum((a[i] - b[i]) ** 2 for i in range(len(a))))


# 分配样本到最近中心
def assign_cluster(x, centers):
    min_dist = float('inf')
    label = -1
    for i, c in enumerate(centers):
        dist = euclidean_distance(x, c)
        if dist < min_dist:
            min_dist = dist
            label = i
    return label


# KMeans 主函数
def Kmeans(data, k, epsilon=1e-4, iteration=100):
    # 随机初始化中心点
    centers = random.sample(data, k)

    for it in range(iteration):
        # 分配每个样本到最近中心
        clusters = [[] for _ in range(k)]
        for x in data:
            label = assign_cluster(x, centers)
            clusters[label].append(x)

        old_centers = copy.deepcopy(centers)

        # 更新中心点
        for i in range(k):
            if clusters[i]:  # 如果簇不为空
                dim = len(clusters[i][0])
                new_center = [sum(point[d] for point in clusters[i]) / len(clusters[i]) for d in range(dim)]
                centers[i] = new_center
            # 簇为空时保持原中心

        # 计算总移动距离
        shift = sum(euclidean_distance(centers[i], old_centers[i]) for i in range(k))
        print(f"Iteration {it + 1}: total center shift = {shift:.6f}")

        # 收敛判断
        if shift < epsilon:
            print("Converged!")
            break

    # 最终每个样本的簇标签
    labels = [assign_cluster(x, centers) for x in data]
    return centers, labels


# 构造简单二维数据
data = [
    [1.0, 2.0], [1.5, 1.8], [5.0, 8.0],
    [8.0, 8.0], [1.0, 0.6], [9.0, 11.0]
]

# 聚成 2 类
centers, labels = Kmeans(data, k=2, epsilon=1e-4, iteration=50)

print("最终聚类中心：", centers)
print("每个样本的簇标签：", labels)
