
from sklearn.datasets import load_iris
import numpy as np
from sklearn.metrics import jaccard_score, calinski_harabasz_score

# 加载鸢尾花数据集
iris = load_iris()
data = iris.data
true_labels = iris.target
n = len(data)
k = 3

dist = np.zeros([n, k+1])
center = data[:k, :]
center_new = np.zeros([k, data.shape[1]])

while True:
    for i in range(n):
        for j in range(k):
            dist[i, j] = np.sqrt(np.sum((data[i, :] - center[j, :])**2))
        dist[i, k] = np.argmin(dist[i, :k])

    for i in range(k):
        index = dist[:, k] == i
        center_new[i, :] = data[index, :].mean(axis=0)

    if np.all(center == center_new):
        break
    center = center_new

labels = dist[:, k]
print(labels)

cp = calinski_harabasz_score(data, labels)
print(f"Calinski-Harabasz Index (CP): {cp:.2f}")

jaccard = jaccard_score(true_labels, labels, average=\'weighted\')
print(f"Jaccard Coefficient: {jaccard:.2f}")

# scikit-learn implementation
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA

iris = load_iris()
data = iris.data

model = KMeans(n_clusters=3, random_state=0, n_init=10) # Added random_state and n_init for reproducibility
model.fit(data)
labels = model.labels_

print("Cluster labels:", labels)

pca = PCA(n_components=2)
data_reduced = pca.fit_transform(data)

plt.scatter(data_reduced[:, 0], data_reduced[:, 1], c=labels, cmap=\'viridis\', marker=\'o\', edgecolor=\'k\', s=50)
plt.title(\'K-means Clustering on Iris Dataset\')
plt.xlabel(\'Principal Component 1\')
plt.ylabel(\'Principal Component 2\')
plt.colorbar(label=\'Cluster Label\')
plt.show()


