import numpy as np
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import normalized_mutual_info_score, adjusted_rand_score
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.decomposition import PCA
import re

# 设置中文显示
plt.rcParams["font.family"] = ["SimHei", "WenQuanYi Micro Hei", "Heiti TC"]
plt.rcParams['axes.unicode_minus'] = False  # 解决负号显示问题

# 加载Iris数据集
iris = load_iris()
X = iris.data
y_true = iris.target
feature_names = iris.feature_names
target_names = iris.target_names

# 数据预处理
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)


# 自定义KMeans实现
class MyKMeans:
    def __init__(self, n_clusters=3, max_iter=100, tol=1e-4, random_state=None):
        self.n_clusters = n_clusters
        self.max_iter = max_iter
        self.tol = tol
        self.random_state = random_state
        self.centroids = None
        self.labels_ = None
        self.inertia_ = None
        self.n_iter_ = 0

    def fit(self, X):
        n_samples, n_features = X.shape

        # 设置随机种子
        np.random.seed(self.random_state)

        # 初始化中心点 - 使用随机选择样本点的方式
        indices = np.random.choice(n_samples, self.n_clusters, replace=False)
        self.centroids = X[indices].copy()

        # 迭代更新中心点
        for i in range(self.max_iter):
            # 保存上一次的中心点
            old_centroids = self.centroids.copy()

            # 分配样本到最近的中心点
            self.labels_ = self._assign_labels(X)

            # 更新中心点
            for j in range(self.n_clusters):
                cluster_points = X[self.labels_ == j]
                if len(cluster_points) > 0:
                    self.centroids[j] = np.mean(cluster_points, axis=0)

            # 计算中心点的变化
            centroids_shift = np.linalg.norm(self.centroids - old_centroids)
            self.n_iter_ = i + 1

            # 检查是否满足提前停止条件
            if centroids_shift < self.tol:
                break

        # 计算惯性(inertia) - 所有样本到其所属中心点的距离平方和
        self.inertia_ = self._compute_inertia(X)

        return self

    def _assign_labels(self, X):
        """将样本分配到最近的中心点"""
        n_samples = X.shape[0]
        labels = np.zeros(n_samples, dtype=int)

        for i in range(n_samples):
            # 计算样本到每个中心点的距离
            distances = np.linalg.norm(X[i] - self.centroids, axis=1)
            # 分配到最近的中心点
            labels[i] = np.argmin(distances)

        return labels

    def _compute_inertia(self, X):
        """计算所有样本到其所属中心点的距离平方和"""
        inertia = 0
        for i in range(self.n_clusters):
            cluster_points = X[self.labels_ == i]
            if len(cluster_points) > 0:
                inertia += np.sum(np.linalg.norm(cluster_points - self.centroids[i], axis=1) ** 2)
        return inertia

    def predict(self, X):
        """预测新样本的聚类标签"""
        n_samples = X.shape[0]
        labels = np.zeros(n_samples, dtype=int)

        for i in range(n_samples):
            distances = np.linalg.norm(X[i] - self.centroids, axis=1)
            labels[i] = np.argmin(distances)

        return labels


# 计算聚类评估指标
def calculate_clustering_metrics(y_true, y_pred):
    # 确保标签是整数类型
    y_true = np.array(y_true)
    y_pred = np.array(y_pred)

    # 1. 计算混淆矩阵
    n_classes = len(np.unique(y_true))
    n_clusters = len(np.unique(y_pred))
    cm = np.zeros((n_classes, n_clusters))

    for i in range(n_classes):
        for j in range(n_clusters):
            cm[i, j] = np.sum((y_true == i) & (y_pred == j))

    # 2. 计算ACC (Accuracy)
    # 由于聚类标签可能与真实标签不匹配，需要找到最佳的标签映射
    from scipy.optimize import linear_sum_assignment

    # 负值是因为linear_sum_assignment找到的是最小化问题
    cost = -cm
    row_ind, col_ind = linear_sum_assignment(cost)
    acc = cm[row_ind, col_ind].sum() / len(y_true)

    # 3. 计算NMI (Normalized Mutual Information)
    nmi = normalized_mutual_info_score(y_true, y_pred)

    # 4. 计算RI (Rand Index)
    # 所有样本对的数量
    n_pairs = len(y_true) * (len(y_true) - 1) // 2

    # 计算一致对的数量
    a = 0  # 被正确分到同一类的样本对数量
    b = 0  # 被正确分到不同类的样本对数量

    for i in range(len(y_true)):
        for j in range(i + 1, len(y_true)):
            same_class = (y_true[i] == y_true[j])
            same_cluster = (y_pred[i] == y_pred[j])

            if same_class and same_cluster:
                a += 1
            elif not same_class and not same_cluster:
                b += 1

    ri = (a + b) / n_pairs

    # 5. 计算ARI (Adjusted Rand Index)
    ari = adjusted_rand_score(y_true, y_pred)

    # 6. 计算F-measure
    # 对每个类计算precision和recall
    precision = np.zeros(n_classes)
    recall = np.zeros(n_classes)
    f1 = np.zeros(n_classes)

    for i in range(n_classes):
        # 该类被分到各聚类的数量
        class_in_clusters = cm[i, :]
        # 分到该聚类的所有类的数量
        clusters_for_class = cm[:, np.argmax(class_in_clusters)]

        # 计算precision: 分到该聚类的样本中，属于该类的比例
        precision[i] = class_in_clusters[np.argmax(class_in_clusters)] / np.sum(clusters_for_class)

        # 计算recall: 该类的样本中，被正确分到该聚类的比例
        recall[i] = class_in_clusters[np.argmax(class_in_clusters)] / np.sum(class_in_clusters)

        # 计算F1分数
        if precision[i] + recall[i] > 0:
            f1[i] = 2 * precision[i] * recall[i] / (precision[i] + recall[i])

    # 计算宏平均F1
    macro_f1 = np.mean(f1)

    return {
        'ACC': acc,
        'NMI': nmi,
        'RI': ri,
        'ARI': ari,
        'F_measure': macro_f1,
        'confusion_matrix': cm
    }


# 可视化聚类结果
def visualize_clustering(X, y_true, y_pred, centroids=None, title="聚类结果可视化"):
    # 使用PCA降维以便可视化
    pca = PCA(n_components=2)
    X_pca = pca.fit_transform(X)

    # 创建图形
    plt.figure(figsize=(15, 6))

    # 真实标签
    plt.subplot(1, 2, 1)
    scatter = plt.scatter(X_pca[:, 0], X_pca[:, 1], c=y_true, cmap='viridis', s=80, alpha=0.8)
    plt.title("真实类别", fontsize=14)
    plt.xlabel(f"主成分1 ({pca.explained_variance_ratio_[0]:.2%})", fontsize=12)
    plt.ylabel(f"主成分2 ({pca.explained_variance_ratio_[1]:.2%})", fontsize=12)

    # 添加图例
    legend = plt.legend(*scatter.legend_elements(), title="类别", fontsize=10)

    # 修复图例文本
    for label in legend.get_texts():
        # 从文本中提取数字
        text = label.get_text()
        # 使用正则表达式提取数字
        match = re.search(r'\d+', text)
        if match:
            num = int(match.group())
            label.set_text(target_names[num])
    plt.gca().add_artist(legend)

    # 聚类结果
    plt.subplot(1, 2, 2)
    scatter = plt.scatter(X_pca[:, 0], X_pca[:, 1], c=y_pred, cmap='viridis', s=80, alpha=0.8)

    # 绘制中心点
    if centroids is not None:
        centroids_pca = pca.transform(centroids)
        plt.scatter(centroids_pca[:, 0], centroids_pca[:, 1], c='red', marker='X', s=200, label='聚类中心')

    plt.title("KMeans聚类结果", fontsize=14)
    plt.xlabel(f"主成分1 ({pca.explained_variance_ratio_[0]:.2%})", fontsize=12)
    plt.ylabel(f"主成分2 ({pca.explained_variance_ratio_[1]:.2%})", fontsize=12)

    # 添加图例
    legend = plt.legend(*scatter.legend_elements(), title="聚类", fontsize=10)

    # 修复图例文本
    for label in legend.get_texts():
        # 从文本中提取数字
        text = label.get_text()
        # 使用正则表达式提取数字
        match = re.search(r'\d+', text)
        if match:
            num = int(match.group())
            label.set_text(f"聚类 {num}")

    if centroids is not None:
        plt.legend(loc='upper right')

    plt.tight_layout()
    plt.suptitle(title, fontsize=16, y=1.02)
    plt.show()


# 主程序
def run_kmeans_analysis(X, y_true, k_values=[3], max_iter=100, tol=1e-4):
    results = []

    for k in k_values:
        print(f"\n执行KMeans聚类，K={k}...")

        # 运行KMeans算法
        kmeans = MyKMeans(n_clusters=k, max_iter=max_iter, tol=tol, random_state=42)
        kmeans.fit(X)

        # 计算聚类评估指标
        metrics = calculate_clustering_metrics(y_true, kmeans.labels_)

        # 打印结果
        print(f"迭代次数: {kmeans.n_iter_}")
        print(f"惯性(inertia): {kmeans.inertia_:.4f}")
        print(f"聚类评估指标:")
        for metric, value in metrics.items():
            if metric != 'confusion_matrix':
                print(f"  {metric}: {value:.4f}")

        # 可视化聚类结果
        visualize_clustering(
            X, y_true, kmeans.labels_, kmeans.centroids,
            title=f"Iris数据集KMeans聚类结果 (K={k})"
        )

        # 保存结果
        results.append({
            'k': k,
            'kmeans': kmeans,
            'metrics': metrics
        })

    return results


# 执行KMeans分析
print("加载Iris数据集...")
print(f"数据集形状: {X.shape}")
print(f"特征: {feature_names}")
print(f"类别: {target_names}")

# 可视化原始数据
print("\n可视化原始数据...")
visualize_clustering(X, y_true, y_true, title="Iris数据集原始分布")

# 运行KMeans分析
print("\n开始KMeans聚类分析...")
results = run_kmeans_analysis(X_scaled, y_true, k_values=[3])

# 评估不同K值的性能
print("\n评估不同K值的性能...")
k_range = range(1, 11)
inertia_values = []

for k in k_range:
    kmeans = MyKMeans(n_clusters=k, random_state=42)
    kmeans.fit(X_scaled)
    inertia_values.append(kmeans.inertia_)

# 绘制肘部法则图
plt.figure(figsize=(10, 6))
plt.plot(k_range, inertia_values, marker='o', linestyle='-', linewidth=2)
plt.title('Iris数据集KMeans聚类的肘部法则图', fontsize=14)
plt.xlabel('K值 (聚类数量)', fontsize=12)
plt.ylabel('惯性 (Inertia)', fontsize=12)
plt.grid(True, linestyle='--', alpha=0.7)
plt.xticks(k_range)
plt.tight_layout()
plt.show()
