import random
import math

# 计算欧几里得距离
def euclidean_distance(x1, x2):
    return math.sqrt(sum((a - b) ** 2 for a, b in zip(x1, x2)))

# 初始化簇中心
def init_centroids(data, k):
    return random.sample(data, k)

# 分配每个数据点到最近的簇中心
def assign_cluster(data, centroids):
    clusters = {i: [] for i in range(len(centroids))}
    
    for point in data:
        distances = [euclidean_distance(point, centroid) for centroid in centroids]
        closest_centroid_index = distances.index(min(distances))
        clusters[closest_centroid_index].append(point)
    
    return clusters

# 计算新的簇中心
def update_centroids(clusters):
    centroids = []
    for points in clusters.values():
        centroid = [sum(dim) / len(points) for dim in zip(*points)]  # 均值
        centroids.append(centroid)
    return centroids

# K-means 聚类算法
def kmeans(data, k, epsilon=1e-4, max_iterations=100):
    # 1. 初始化簇中心
    centroids = init_centroids(data, k)
    prev_centroids = None
    iteration = 0
    
    while iteration < max_iterations:
        # 2. 分配簇
        clusters = assign_cluster(data, centroids)
        
        # 3. 更新簇中心
        centroids = update_centroids(clusters)
        
        # 4. 检查簇中心是否变化
        centroid_shifts = [euclidean_distance(c, p) for c, p in zip(centroids, prev_centroids)] if prev_centroids else [float('inf')] * k
        if all(shift < epsilon for shift in centroid_shifts):
            break
        
        prev_centroids = centroids
        iteration += 1
    
    return centroids, clusters

# 示例：使用K-means进行聚类
data = [
    [1.0, 2.0], [1.5, 1.8], [5.0, 8.0], [8.0, 8.0],
    [1.0, 0.6], [9.0, 11.0], [8.0, 2.0], [10.0, 2.0]
]

k = 3  # 设置聚类数
centroids, clusters = kmeans(data, k)

print("最终簇中心:", centroids)
print("每个簇的成员:")
for cluster_id, members in clusters.items():
    print(f"簇 {cluster_id}: {members}")
