import random
import math

# 计算两个数据点的欧几里得距离
def euclidean_distance(x1, x2):
    return math.sqrt(sum((a - b) ** 2 for a, b in zip(x1, x2)))

# 将数据点分配到最近的簇
def assign_cluster(x, centers):
    min_distance = float('inf')
    cluster_id = -1
    for i, center in enumerate(centers):
        dist = euclidean_distance(x, center)
        if dist < min_distance:
            min_distance = dist
            cluster_id = i
    return cluster_id

# K-means聚类算法
def Kmeans(data, k, epsilon=1e-4, max_iterations=300):
    # 1. 随机初始化K个聚类中心
    centers = random.sample(data, k)
    
    prev_centers = None
    iterations = 0
    
    # 2. 进行最大迭代次数次的迭代
    while iterations < max_iterations:
        # 3. 为每个数据点分配簇
        clusters = {i: [] for i in range(k)}
        for point in data:
            cluster_id = assign_cluster(point, centers)
            clusters[cluster_id].append(point)
        
        # 4. 更新聚类中心
        new_centers = []
        for i in range(k):
            if clusters[i]:  # 防止某个簇没有数据
                new_center = [sum(x) / len(x) for x in zip(*clusters[i])]
            else:
                new_center = centers[i]  # 如果簇没有数据，保持中心不变
            new_centers.append(new_center)
        
        # 5. 判断聚类中心是否收敛
        diff = sum(euclidean_distance(new_centers[i], centers[i]) for i in range(k))
        
        if diff < epsilon:
            print(f"Converged after {iterations} iterations.")
            break
        
        # 更新聚类中心
        centers = new_centers
        iterations += 1
    
    # 返回最终的聚类中心和每个簇的点
    return centers, clusters

# 示例数据
data = [
    [1.0, 2.0],
    [1.5, 1.8],
    [5.0, 8.0],
    [8.0, 8.0],
    [1.0, 0.6],
    [9.0, 11.0],
    [8.0, 2.0],
    [10.0, 2.0],
    [9.0, 3.0]
]

# 使用K-means算法进行聚类
k = 3
centers, clusters = Kmeans(data, k)

print("聚类中心:", centers)
print("簇的分配:")
for i, cluster in clusters.items():
    print(f"簇 {i}: {cluster}")
