import math
import random
from collections import defaultdict

def euclidean_distance(point1, point2):
    """
    计算两个点之间的欧几里得距离
    """
    if len(point1) != len(point2):
        raise ValueError("Points must have the same dimensions")
    
    squared_distance = 0
    for i in range(len(point1)):
        squared_distance += (point1[i] - point2[i]) ** 2
    
    return math.sqrt(squared_distance)

def assign_cluster(x, centroids):
    """
    将数据点x分配到最近的质心
    """
    min_distance = float('inf')
    cluster_index = -1
    
    for i, centroid in enumerate(centroids):
        distance = euclidean_distance(x, centroid)
        if distance < min_distance:
            min_distance = distance
            cluster_index = i
    
    return cluster_index

def calculate_centroids(data, cluster_assignments, k):
    """
    根据当前聚类分配计算新的质心
    """
    # 按聚类分组数据点
    clusters = defaultdict(list)
    for i, cluster_id in enumerate(cluster_assignments):
        clusters[cluster_id].append(data[i])
    
    new_centroids = []
    for cluster_id in range(k):
        if cluster_id in clusters and len(clusters[cluster_id]) > 0:
            # 计算该聚类中所有点的均值作为新质心
            cluster_points = clusters[cluster_id]
            dimensions = len(cluster_points[0])
            new_centroid = []
            
            for dim in range(dimensions):
                dim_sum = sum(point[dim] for point in cluster_points)
                dim_avg = dim_sum / len(cluster_points)
                new_centroid.append(dim_avg)
            
            new_centroids.append(new_centroid)
        else:
            # 如果某个聚类没有数据点，随机选择一个数据点作为质心
            new_centroids.append(random.choice(data))
    
    return new_centroids

def has_converged(old_centroids, new_centroids, epsilon):
    """
    检查质心是否收敛（变化小于epsilon）
    """
    for old_centroid, new_centroid in zip(old_centroids, new_centroids):
        distance = euclidean_distance(old_centroid, new_centroid)
        if distance > epsilon:
            return False
    return True

def Kmeans(data, k, epsilon=1e-4, max_iterations=100):
    """
    K均值聚类算法
    
    参数:
    data: 数据集，每个元素是一个数据点（列表或元组）
    k: 聚类数量
    epsilon: 收敛阈值
    max_iterations: 最大迭代次数
    
    返回:
    centroids: 最终质心列表
    cluster_assignments: 每个数据点的聚类分配
    """
    
    if len(data) < k:
        raise ValueError("Number of data points must be at least k")
    
    # 1. 随机初始化质心
    centroids = random.sample(data, k)
    
    # 存储每个数据点的聚类分配
    cluster_assignments = [0] * len(data)
    
    iteration = 0
    converged = False
    
    while not converged and iteration < max_iterations:
        # 2. 分配每个数据点到最近的质心
        for i, point in enumerate(data):
            cluster_assignments[i] = assign_cluster(point, centroids)
        
        # 3. 计算新的质心
        new_centroids = calculate_centroids(data, cluster_assignments, k)
        
        # 4. 检查是否收敛
        converged = has_converged(centroids, new_centroids, epsilon)
        
        # 更新质心
        centroids = new_centroids
        iteration += 1
        
        print(f"Iteration {iteration}: converged = {converged}")
    
    return centroids, cluster_assignments

# 测试代码
if __name__ == "__main__":
    # 创建测试数据
    # 三个明显的聚类
    test_data = [
        [1, 1], [1, 2], [2, 1], [2, 2],  # 聚类1
        [8, 8], [8, 9], [9, 8], [9, 9],  # 聚类2
        [1, 8], [2, 8], [1, 9], [2, 9]   # 聚类3
    ]
    
    print("测试数据:")
    for i, point in enumerate(test_data):
        print(f"Point {i}: {point}")
    
    print("\n运行K-means聚类...")
    centroids, assignments = Kmeans(test_data, k=3, epsilon=0.001, max_iterations=100)
    
    print("\n最终结果:")
    print("质心:")
    for i, centroid in enumerate(centroids):
        print(f"Cluster {i}: {centroid}")
    
    print("\n聚类分配:")
    clusters = defaultdict(list)
    for i, (point, cluster_id) in enumerate(zip(test_data, assignments)):
        clusters[cluster_id].append(point)
        print(f"Point {point} -> Cluster {cluster_id}")
    
    print("\n按聚类分组:")
    for cluster_id, points in clusters.items():
        print(f"Cluster {cluster_id}: {points}")