import random
import math
from collections import defaultdict

def euclidean_distance(p1, p2):
    if len(p1) != len(p2):
        raise ValueError("Points must have the same number of dimensions")

    sum_squares = 0
    for i in range(len(p1)):
        sum_squares += (p1[i] - p2[i]) ** 2
    return math.sqrt(sum_squares)


def assign_cluster(x, c):
    min_dist = float('inf')
    min_index = -1
    for i, centroid in enumerate(c):
        dist = euclidean_distance(x, centroid)
        if dist < min_dist:
            min_dist = dist
            min_index = i
    return min_index


def Kmeans(data, k, epsilon=1e-4, max_iterations=100):
    if k > len(data):
        raise ValueError("K cannot be greater than the number of data points.")

    initial_indices = random.sample(range(len(data)), k)
    centroids = [list(data[i]) for i in initial_indices]

    loss_history = []

    print(f"初始质心: {centroids}")

    for i in range(max_iterations):
        clusters = [-1] * len(data)
        for point_idx, point in enumerate(data):
            cluster_idx = assign_cluster(point, centroids)
            clusters[point_idx] = cluster_idx

        loss = 0.0
        for point_idx, point in enumerate(data):
            cluster_idx = clusters[point_idx]
            centroid = centroids[cluster_idx]
            loss += euclidean_distance(point, centroid) ** 2  # 使用平方距离作为损失
        loss_history.append(loss)

        new_centroids = []
        for j in range(k):
            cluster_points = [data[p_idx] for p_idx in range(len(data)) if clusters[p_idx] == j]

            if not cluster_points:
                new_centroids.append(centroids[j])
                continue

            num_points = len(cluster_points)
            num_dimensions = len(cluster_points[0])
            new_centroid = [0.0] * num_dimensions

            for dim in range(num_dimensions):
                sum_dim = sum(point[dim] for point in cluster_points)
                new_centroid[dim] = sum_dim / num_points

            new_centroids.append(new_centroid)

        total_shift = sum(euclidean_distance(centroids[j], new_centroids[j]) for j in range(k))

        print(f"--- 迭代 {i + 1} ---")
        print(f"新质心: {new_centroids}")
        print(f"损失: {loss:.4f}, 质心总位移: {total_shift:.6f}")

        if total_shift < epsilon:
            print(f"\n算法在 {i + 1} 次迭代后收敛。")
            return new_centroids, clusters, loss_history

        centroids = new_centroids

    print(f"\n在 {max_iterations} 次迭代后未达到收敛阈值。")
    return centroids, clusters, loss_history


if __name__ == "__main__":
    dataset = [
        [1.0, 2.0], [1.2, 1.8], [0.8, 2.2], [1.1, 1.9],
        [5.0, 8.0], [5.2, 8.1], [4.8, 7.9], [5.1, 8.2],
        [9.0, 2.0], [9.2, 1.8], [8.8, 2.2], [9.1, 1.9],
        [5.0, 5.0], [5.2, 4.8], [4.8, 5.2], [5.1, 4.9], [4.9, 5.1]
    ]
    print(f'数据集大小： {len(dataset)}')

    K = 4
    EPSILON = 1e-4
    MAX_ITER = 100

    random.seed(0)

    final_centroids, final_clusters, loss_history = Kmeans(dataset, K, EPSILON, MAX_ITER)
    
    print("\n==================== 最终结果 ====================")
    print("最终质心:")
    for i, centroid in enumerate(final_centroids):
        print(f"  簇 {i + 1}: ({centroid[0]:.4f}, {centroid[1]:.4f})")

    print("\n聚类分配 (数据点索引 -> 簇索引):")
    for i, cluster_idx in enumerate(final_clusters):
        print(f"  点 {i} -> 簇 {cluster_idx}")

    print("\n损失函数历史:")
    for i, loss in enumerate(loss_history):
        print(f"  迭代 {i + 1}: {loss:.4f}")

