import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.metrics import normalized_mutual_info_score, rand_score, adjusted_rand_score
from sklearn.preprocessing import StandardScaler
import random

# 确保结果可重现
np.random.seed(42)
random.seed(42)


class KMeans:
    """基于欧式距离的KMeans聚类算法实现"""

    def __init__(self, n_clusters=3, max_iter=100, tol=1e-4):
        """
        初始化KMeans模型

        参数:
            n_clusters: 聚类数量K
            max_iter: 最大迭代次数
            tol: 聚类中心变化阈值，用于提前停止
        """
        self.n_clusters = n_clusters
        self.max_iter = max_iter
        self.tol = tol
        self.centers = None
        self.labels = None
        self.inertia = None
        self.iterations = 0

    def euclidean_distance(self, x1, x2):
        """计算两个样本之间的欧式距离"""
        return np.sqrt(np.sum((x1 - x2) ** 2, axis=1))

    def initialize_centers(self, X):
        """随机初始化聚类中心"""
        n_samples = X.shape[0]
        # 随机选择n_clusters个样本作为初始中心
        indices = np.random.choice(n_samples, self.n_clusters, replace=False)
        return X[indices].copy()

    def assign_clusters(self, X, centers):
        """将每个样本分配到最近的聚类中心"""
        n_samples = X.shape[0]
        labels = np.zeros(n_samples, dtype=int)

        for i in range(n_samples):
            distances = self.euclidean_distance(X[i], centers)
            labels[i] = np.argmin(distances)

        return labels

    def update_centers(self, X, labels):
        """更新聚类中心"""
        n_features = X.shape[1]
        centers = np.zeros((self.n_clusters, n_features))

        for k in range(self.n_clusters):
            cluster_samples = X[labels == k]
            if len(cluster_samples) > 0:
                centers[k] = np.mean(cluster_samples, axis=0)

        return centers

    def fit(self, X):
        """训练KMeans模型"""
        n_samples = X.shape[0]

        # 初始化聚类中心
        self.centers = self.initialize_centers(X)

        # 迭代优化
        for i in range(self.max_iter):
            # 分配样本到聚类
            old_labels = self.labels
            self.labels = self.assign_clusters(X, self.centers)

            # 更新聚类中心
            old_centers = self.centers.copy()
            self.centers = self.update_centers(X, self.labels)

            # 计算聚类中心变化
            center_shift = np.sqrt(np.sum((self.centers - old_centers) ** 2))

            # 计算惯性(Inertia)：样本到其聚类中心的距离平方和
            self.inertia = 0
            for k in range(self.n_clusters):
                cluster_samples = X[self.labels == k]
                if len(cluster_samples) > 0:
                    self.inertia += np.sum(self.euclidean_distance(cluster_samples, self.centers[k]) ** 2)

            self.iterations = i + 1

            # 检查是否满足提前停止条件
            if center_shift < self.tol:
                print(f"提前停止，迭代次数: {i + 1}, 中心变化: {center_shift:.6f} < {self.tol}")
                break

        return self

    def predict(self, X):
        """预测新样本的聚类标签"""
        if self.centers is None:
            raise ValueError("模型尚未训练，请先调用fit方法")
        return self.assign_clusters(X, self.centers)


def get_matching_dict(true_labels, pred_labels):
    """获取真实标签与预测标签的匹配字典"""
    # 计算每个预测标签对应的最常见真实标签
    matching = {}
    for pred in np.unique(pred_labels):
        mask = (pred_labels == pred)
        if np.sum(mask) == 0:
            continue
        true_in_cluster = true_labels[mask]
        matching[pred] = np.bincount(true_in_cluster).argmax()
    return matching


def calculate_f_measure(true_labels, pred_labels):
    """计算F-measure"""
    matching = get_matching_dict(true_labels, pred_labels)

    # 转换预测标签为匹配的真实标签
    matched_pred_labels = np.array([matching.get(lbl, lbl) for lbl in pred_labels])

    # 计算混淆矩阵
    unique_labels = np.unique(true_labels)
    cm = np.zeros((len(unique_labels), len(unique_labels)))

    for i, true in enumerate(unique_labels):
        for j, pred in enumerate(unique_labels):
            cm[i, j] = np.sum((true_labels == true) & (matched_pred_labels == pred))

    # 计算F-measure
    f_measures = []
    for i in range(len(unique_labels)):
        # 精确率
        precision = cm[i, i] / np.sum(cm[:, i]) if np.sum(cm[:, i]) > 0 else 0
        # 召回率
        recall = cm[i, i] / np.sum(cm[i, :]) if np.sum(cm[i, :]) > 0 else 0
        # F-measure
        f_measure = 2 * precision * recall / (precision + recall) if (precision + recall) > 0 else 0
        f_measures.append(f_measure)

    # 返回加权平均F-measure
    return np.average(f_measures, weights=np.sum(cm, axis=1) / np.sum(cm))


def calculate_acc(true_labels, pred_labels):
    """计算ACC (Accuracy)"""
    matching = get_matching_dict(true_labels, pred_labels)
    matched_pred_labels = np.array([matching.get(lbl, lbl) for lbl in pred_labels])
    return np.mean(true_labels == matched_pred_labels)


def plot_clustering_results(X, true_labels, pred_labels, title):
    """可视化聚类结果"""
    # 使用PCA降维到2维
    pca = PCA(n_components=2)
    X_2d = pca.fit_transform(X)

    # 绘制真实标签
    plt.figure(figsize=(15, 5))

    plt.subplot(1, 2, 1)
    scatter = plt.scatter(X_2d[:, 0], X_2d[:, 1], c=true_labels, cmap='viridis', s=50, alpha=0.8)
    plt.title('真实标签分布')
    plt.xlabel('主成分1')
    plt.ylabel('主成分2')
    plt.colorbar(scatter)

    # 绘制预测标签
    plt.subplot(1, 2, 2)
    scatter = plt.scatter(X_2d[:, 0], X_2d[:, 1], c=pred_labels, cmap='viridis', s=50, alpha=0.8)
    plt.title(title)
    plt.xlabel('主成分1')
    plt.ylabel('主成分2')
    plt.colorbar(scatter)

    plt.tight_layout()
    plt.show()


def main():
    # 加载鸢尾花数据集
    iris = load_iris()
    X = iris.data
    y = iris.target
    feature_names = iris.feature_names

    # 数据标准化
    scaler = StandardScaler()
    X_scaled = scaler.fit_transform(X)

    # 设置KMeans参数
    n_clusters = 3
    max_iter = 100
    tol = 1e-4

    # 初始化并训练KMeans模型
    kmeans = KMeans(n_clusters=n_clusters, max_iter=max_iter, tol=tol)
    kmeans.fit(X_scaled)

    # 获取聚类标签
    pred_labels = kmeans.labels

    # 计算聚类指标
    f_measure = calculate_f_measure(y, pred_labels)
    acc = calculate_acc(y, pred_labels)
    nmi = normalized_mutual_info_score(y, pred_labels)
    ri = rand_score(y, pred_labels)
    ari = adjusted_rand_score(y, pred_labels)

    # 打印结果
    print(f"聚类结果 - K={n_clusters}, 迭代次数={kmeans.iterations}")
    print(f"惯性(Inertia): {kmeans.inertia:.4f}")
    print(f"F-measure: {f_measure:.4f}")
    print(f"ACC: {acc:.4f}")
    print(f"NMI: {nmi:.4f}")
    print(f"RI: {ri:.4f}")
    print(f"ARI: {ari:.4f}")

    # 可视化聚类结果
    plot_clustering_results(X_scaled, y, pred_labels,
                            f'KMeans聚类结果 (K={n_clusters}, 迭代={kmeans.iterations})')


if __name__ == "__main__":
    main()