import csv
import random
import math
from collections import defaultdict
from sklearn.preprocessing import StandardScaler


# 加载数据集并跳过最后一列（假设它是标签）
def load_dataset(filename):
    with open(filename, 'r') as file:
        csv_reader = csv.reader(file)
        dataset = [list(map(float, row[:-1])) for row in csv_reader if row]  # 跳过空行和最后一列
    return dataset


# 计算两点之间的欧几里得距离
def euclidean_distance(point1, point2):
    return math.sqrt(sum((p1 - p2) ** 2 for p1, p2 in zip(point1, point2)))


# K-means++ 初始化质心
def k_means_plus_plus(data, k):
    centroids = [random.choice(data)]
    for _ in range(1, k):
        distances = [min(euclidean_distance(point, centroid) for centroid in centroids) for point in data]
        total_distance = sum(distances)
        probabilities = [d / total_distance for d in distances]
        cumulative_probabilities = [sum(probabilities[:i + 1]) for i in range(len(probabilities))]
        r = random.random()
        for i, prob in enumerate(cumulative_probabilities):
            if r < prob:
                centroids.append(data[i])
                break
    return centroids


# 分配数据点到最近的质心，并返回每个质心对应的点索引列表
def assign_clusters(data, centroids):
    clusters = defaultdict(list)
    for i, point in enumerate(data):
        closest_centroid_index = min(range(len(centroids)), key=lambda j: euclidean_distance(point, centroids[j]))
        clusters[closest_centroid_index].append(i)
    return clusters


# 更新质心
def update_centroids(data, clusters):
    new_centroids = []
    for cluster in clusters.values():
        if cluster:  # 如果簇不为空
            cluster_points = [data[i] for i in cluster]
            new_centroid = [sum(x) / len(cluster_points) for x in zip(*cluster_points)]
        else:  # 如果簇为空，则随机选择一个新的质心
            new_centroid = random.choice(data)
        new_centroids.append(new_centroid)
    return new_centroids


# K-means 算法
def k_means(data, k, max_iterations=100, tol=1e-4):
    scaler = StandardScaler()
    data_scaled = scaler.fit_transform(data)  # 标准化数据

    centroids = k_means_plus_plus(data_scaled, k)
    for _ in range(max_iterations):
        clusters = assign_clusters(data_scaled, centroids)
        new_centroids = update_centroids(data_scaled, clusters)

        # 计算质心移动的最大距离
        centroid_shift = max(euclidean_distance(old, new) for old, new in zip(centroids, new_centroids))
        if centroid_shift < tol:
            break

        centroids = new_centroids

    # 将标准化的数据映射回原始数据空间
    final_clusters = {key: [data[i] for i in indices] for key, indices in clusters.items()}

    return final_clusters, scaler.inverse_transform(centroids)


# 示例用法
if __name__ == "__main__":
    filename = 'iris.data'
    try:
        data = load_dataset(filename)
        clusters, centroids = k_means(data, k=3)

        # 打印聚类结果
        for i, (centroid, cluster) in enumerate(zip(centroids, clusters.values()), start=1):
            print(f"Cluster {i} (Centroid: {centroid}, Points: {len(cluster)}): {cluster[:5]}...")  # 打印每个簇的前 5 个点
    except FileNotFoundError:
        print(f"File {filename} not found.")