import random
import math

def load_dataset(file_path):
    dataset = []
    with open(file_path, 'r') as file:
        for line in file:
            items = line.strip().split(',')
            # 确保行中有足够的数据并且最后一个值不是空的
            if len(items) == 5 and items[-1]:
                dataset.append([float(items[0]), float(items[1]), float(items[2]), float(items[3])])
    return dataset

def kmeans(X, k):
    n_samples, n_features = len(X), len(X[0])
    # 随机选择k个初始聚类中心
    idx = random.sample(range(n_samples), k)
    C = [X[idx[i]] for i in range(k)]
    
    while True:
        # 分配样本到最近的聚类中心
        clusters = [[] for _ in range(k)]
        for x in X:
            distances = [euclidean_distance(x, c) for c in C]
            min_distance_index = distances.index(min(distances))
            clusters[min_distance_index].append(x)
        
        # 更新聚类中心
        new_C = []
        for cluster in clusters:
            if cluster:  # 确保聚类不为空
                new_C.append(mean_vector(cluster))
            else:  # 如果聚类为空，则随机选择一个新的聚类中心
                new_C.append(X[random.randint(0, n_samples - 1)])
        
        # 检查聚类中心是否变化，如果没有变化，则结束循环
        if C == new_C:
            break
        C = new_C

    return clusters, C

def euclidean_distance(a, b):
    return math.sqrt(sum((ea - eb) ** 2 for ea, eb in zip(a, b)))

def mean_vector(vectors):
    return [sum(values) / len(values) for values in zip(*vectors)]

# 测试数据集
if __name__ == "__main__":
    file_path = 'iris.txt'
    dataset = load_dataset(file_path)
    print("Loaded dataset:")
    for sample in dataset[:5]:
        print(sample)

    k = 3
    clusters, centers = kmeans(dataset, k)

    # 打印聚类结果
    print("\nCluster centers:")
    for i, center in enumerate(centers):
        print(f"Cluster {i}: {center}")

    print("\nClustered data:")
    for i, cluster in enumerate(clusters):
        print(f"\nCluster {i}:")
        for point in cluster:
            print(point)