import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import normalized_mutual_info_score, adjusted_rand_score
import random


class KMeans:
    def __init__(self, n_clusters=3, max_iter=100, tol=1e-4):
        self.n_clusters = n_clusters
        self.max_iter = max_iter
        self.tol = tol
        self.centroids = None
        self.labels = None

    def fit(self, X):
        n_samples, n_features = X.shape

        # 随机初始化中心点
        random_indices = random.sample(range(n_samples), self.n_clusters)
        self.centroids = X[random_indices]

        for i in range(self.max_iter):
            # 分配样本到最近的中心点
            distances = self._compute_distances(X)
            self.labels = np.argmin(distances, axis=1)

            # 更新中心点
            new_centroids = np.array([X[self.labels == j].mean(axis=0)
                                      for j in range(self.n_clusters)])

            # 检查是否收敛
            if np.linalg.norm(new_centroids - self.centroids) < self.tol:
                break

            self.centroids = new_centroids

        return self

    def _compute_distances(self, X):
        # 计算每个样本到每个中心点的欧式距离
        distances = np.zeros((X.shape[0], self.n_clusters))
        for i, centroid in enumerate(self.centroids):
            distances[:, i] = np.linalg.norm(X - centroid, axis=1)
        return distances

    def predict(self, X):
        distances = self._compute_distances(X)
        return np.argmin(distances, axis=1)


# 评估指标计算
def calculate_purity(y_true, y_pred):
    # 计算纯度
    from sklearn.metrics.cluster import contingency_matrix
    matrix = contingency_matrix(y_true, y_pred)
    return np.sum(np.max(matrix, axis=0)) / np.sum(matrix)


def calculate_f_measure(y_true, y_pred):
    # 计算F-measure
    from sklearn.metrics import precision_recall_fscore_support
    precision, recall, _, _ = precision_recall_fscore_support(
        y_true, y_pred, average='macro')
    return 2 * (precision * recall) / (precision + recall)


def calculate_accuracy(y_true, y_pred):
    # 计算准确率
    from sklearn.metrics import accuracy_score
    return accuracy_score(y_true, y_pred)


# 加载数据
iris = load_iris()
X = iris.data
y = iris.target
feature_names = iris.feature_names

# 数据标准化
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)

# 运行KMeans算法
kmeans = KMeans(n_clusters=3, max_iter=100, tol=1e-4)
kmeans.fit(X_scaled)
y_pred = kmeans.predict(X_scaled)

# 可视化聚类结果
plt.figure(figsize=(12, 5))

# 聚类前的真实标签
plt.subplot(1, 2, 1)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap='viridis', edgecolor='k')
plt.title('真实标签')
plt.xlabel(feature_names[0])
plt.ylabel(feature_names[1])

# 聚类后的预测标签
plt.subplot(1, 2, 2)
plt.scatter(X[:, 0], X[:, 1], c=y_pred, cmap='viridis', edgecolor='k')
plt.title('KMeans聚类结果')
plt.xlabel(feature_names[0])
plt.ylabel(feature_names[1])

plt.tight_layout()
plt.savefig('kmeans_iris_clustering.png')
plt.show()

# 计算评估指标
purity = calculate_purity(y, y_pred)
f_measure = calculate_f_measure(y, y_pred)
acc = calculate_accuracy(y, y_pred)
nmi = normalized_mutual_info_score(y, y_pred)
ari = adjusted_rand_score(y, y_pred)

# 打印评估结果
print("聚类评估指标:")
print(f"纯度(Purity): {purity:.4f}")
print(f"F-measure: {f_measure:.4f}")
print(f"准确率(Accuracy): {acc:.4f}")
print(f"标准化互信息(NMI): {nmi:.4f}")
print(f"调整兰德指数(ARI): {ari:.4f}")
