import math
import random
from collections import defaultdict

def euclidean_distance(point1, point2):

    if len(point1) != len(point2):
        raise ValueError("Points must have the same dimensions")
    
    squared_distance = 0
    for i in range(len(point1)):
        squared_distance += (point1[i] - point2[i]) ** 2
    
    return math.sqrt(squared_distance)

def assign_cluster(x, centroids):

    min_distance = float('inf')
    closest_centroid = 0
    
    for i, centroid in enumerate(centroids):
        distance = euclidean_distance(x, centroid)
        if distance < min_distance:
            min_distance = distance
            closest_centroid = i
    
    return closest_centroid

def initialize_centroids(data, k):

    # 从数据中随机选择k个点作为初始质心
    return random.sample(data, k)

def update_centroids(clusters, data_dim):

    new_centroids = []
    
    for cluster_idx, points in clusters.items():
        if not points:
            # 如果某个簇没有点，随机重新初始化
            new_centroid = [random.random() for _ in range(data_dim)]
            new_centroids.append(new_centroid)
            continue
        
        # 计算该簇中所有点的均值作为新质心
        centroid = [0] * data_dim
        for point in points:
            for i in range(data_dim):
                centroid[i] += point[i]
        
        for i in range(data_dim):
            centroid[i] /= len(points)
        
        new_centroids.append(centroid)
    
    return new_centroids

def has_converged(old_centroids, new_centroids, epsilon):

    for old_centroid, new_centroid in zip(old_centroids, new_centroids):
        distance = euclidean_distance(old_centroid, new_centroid)
        if distance >= epsilon:
            return False
    return True

def Kmeans(data, k, epsilon=1e-4, iteration=100):

    if len(data) < k:
        raise ValueError("Number of data points must be at least k")
    
    if k <= 0:
        raise ValueError("k must be positive")
    
    # 1. 初始化质心
    centroids = initialize_centroids(data, k)
    data_dim = len(data[0])
    
    # 用于存储历史信息
    history = {
        'centroids': [centroids.copy()],
        'assignments': []
    }
    
    for iter_count in range(iteration):
        # 2. 分配数据点到最近的质心
        clusters = defaultdict(list)
        assignments = []
        
        for point in data:
            cluster_idx = assign_cluster(point, centroids)
            clusters[cluster_idx].append(point)
            assignments.append(cluster_idx)
        
        history['assignments'].append(assignments.copy())
        
        # 3. 更新质心
        new_centroids = update_centroids(clusters, data_dim)
        history['centroids'].append(new_centroids.copy())
        
        # 4. 检查收敛
        if has_converged(centroids, new_centroids, epsilon):
            print(f"算法在 {iter_count + 1} 次迭代后收敛")
            break
        
        centroids = new_centroids
    
    else:
        print(f"算法在 {iteration} 次迭代后未收敛")
    
    return centroids, clusters, assignments, history

# 测试代码
def generate_sample_data(n_points=100, centers=3, dim=2):

    data = []
    true_centers = []
    
    # 生成真实的中心点
    for i in range(centers):
        center = [random.uniform(0, 10) for _ in range(dim)]
        true_centers.append(center)
        
        # 围绕中心点生成数据
        for _ in range(n_points // centers):
            point = [coord + random.gauss(0, 1) for coord in center]
            data.append(point)
    
    return data, true_centers

def calculate_sse(data, assignments, centroids):

    sse = 0
    for point, assignment in zip(data, assignments):
        centroid = centroids[assignment]
        sse += euclidean_distance(point, centroid) ** 2
    return sse

# 演示使用
if __name__ == "__main__":
    # 生成测试数据
    print("生成测试数据...")
    data, true_centers = generate_sample_data(n_points=150, centers=3, dim=2)
    
    print(f"数据点数量: {len(data)}")
    print(f"数据维度: {len(data[0])}")
    
    # 运行K均值聚类
    print("\n运行K均值聚类...")
    centroids, clusters, assignments, history = Kmeans(data, k=3, epsilon=0.001, iteration=100)
    
    # 输出结果
    print(f"\n聚类结果:")
    print(f"找到的质心数量: {len(centroids)}")
    for i, centroid in enumerate(centroids):
        print(f"簇 {i}: 质心位置 {[round(c, 3) for c in centroid]}, 包含 {len(clusters[i])} 个点")
    
    # 计算误差
    sse = calculate_sse(data, assignments, centroids)
    print(f"\n聚类误差平方和(SSE): {sse:.4f}")
    
    # 显示每个点的分配情况(前10个)
    print(f"\n前10个数据点的簇分配:")
    for i in range(min(10, len(data))):
        print(f"点 {i}: {data[i]} -> 簇 {assignments[i]}")