import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
import warnings

# 解决字体显示问题
try:
    plt.rcParams['font.sans-serif'] = ['SimHei', 'Microsoft YaHei', 'DejaVu Sans']
    plt.rcParams['axes.unicode_minus'] = False
except:
    pass

# 忽略字体相关的警告
warnings.filterwarnings("ignore", category=UserWarning)


class KMeans:
    def __init__(self, n_clusters=3, max_iter=100, tol=1e-4, random_state=42):
        self.n_clusters = n_clusters  # 聚类数量k
        self.max_iter = max_iter  # 最大迭代次数
        self.tol = tol  # 聚类中心变化阈值（小于该值则收敛）
        self.random_state = random_state  # 随机种子（确保结果可复现）
        self.centers = None  # 存储最终聚类中心
        self.labels = None  # 存储每个样本的簇标签

    def euclidean_distance(self, x1, x2):
        """计算欧氏距离：||x1 - x2||"""
        return np.sqrt(np.sum((x1 - x2) ** 2, axis=-1))

    def fit(self, X):
        """训练k-means模型：迭代更新聚类中心和簇标签"""
        np.random.seed(self.random_state)
        n_samples, n_features = X.shape

        # 1. 初始化聚类中心：从样本中随机选择k个
        random_idx = np.random.choice(n_samples, self.n_clusters, replace=False)
        self.centers = X[random_idx]

        # 2. 迭代更新
        for _ in range(self.max_iter):
            # 分配样本到最近的簇（计算每个样本到各中心的距离，取距离最小的中心索引）
            distances = np.zeros((n_samples, self.n_clusters))
            for i in range(self.n_clusters):
                distances[:, i] = np.sqrt(np.sum((X - self.centers[i]) ** 2, axis=1))
            new_labels = np.argmin(distances, axis=1)

            # 计算新的聚类中心（每个簇的样本均值）
            new_centers = np.zeros((self.n_clusters, n_features))
            for i in range(self.n_clusters):
                cluster_samples = X[new_labels == i]
                if len(cluster_samples) > 0:
                    new_centers[i] = cluster_samples.mean(axis=0)
                else:
                    # 如果簇为空，重新随机初始化该中心
                    new_centers[i] = X[np.random.choice(n_samples)]

            # 检查收敛：聚类中心变化小于tol
            center_diff = np.linalg.norm(new_centers - self.centers)
            if center_diff < self.tol:
                print(f"收敛于第 {_ + 1} 次迭代，中心变化: {center_diff:.6f}")
                break

            # 更新聚类中心和标签
            self.centers = new_centers
            self.labels = new_labels

        # 最终确认标签
        if self.labels is None:
            distances = np.zeros((n_samples, self.n_clusters))
            for i in range(self.n_clusters):
                distances[:, i] = np.sqrt(np.sum((X - self.centers[i]) ** 2, axis=1))
            self.labels = np.argmin(distances, axis=1)

    def predict(self, X):
        """预测新样本的簇标签"""
        n_samples = X.shape[0]
        distances = np.zeros((n_samples, self.n_clusters))
        for i in range(self.n_clusters):
            distances[:, i] = np.sqrt(np.sum((X - self.centers[i]) ** 2, axis=1))
        return np.argmin(distances, axis=1)

    def calculate_sse(self, X):
        """计算簇内误差平方和（SSE）"""
        sse = 0.0
        for i in range(self.n_clusters):
            cluster_samples = X[self.labels == i]
            if len(cluster_samples) == 0:
                continue
            sse += np.sum((cluster_samples - self.centers[i]) ** 2)
        return sse

    def plot_clusters(self, X, feature_names, title="K-means Clustering Result"):
        """可视化聚类结果"""
        plt.figure(figsize=(10, 7))
        colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd']

        # 绘制样本点（按预测簇标签着色）
        for i in range(self.n_clusters):
            cluster_points = X[self.labels == i]
            if len(cluster_points) > 0:
                plt.scatter(cluster_points[:, 0], cluster_points[:, 1],
                            c=colors[i % len(colors)], label=f'Cluster {i + 1}',
                            alpha=0.7, s=50, edgecolors='white', linewidth=0.5)

        # 绘制聚类中心（红色叉号，放大显示）
        plt.scatter(self.centers[:, 0], self.centers[:, 1],
                    marker='x', s=200, linewidths=3, color='red',
                    label='Cluster Centers', edgecolors='black', zorder=10)

        # 设置图表信息
        plt.xlabel(feature_names[0], fontsize=12)
        plt.ylabel(feature_names[1], fontsize=12)
        plt.title(title, fontsize=14)
        plt.legend()
        plt.grid(True, alpha=0.3)
        plt.tight_layout()
        return plt


# 测试代码
if __name__ == "__main__":
    # 加载Iris数据集（仅使用前2个特征，便于可视化）
    iris = load_iris()
    X = iris.data[:, :2]  # 特征：萼片长度、萼片宽度
    y_true = iris.target  # 真实类别（用于对比，无监督算法不依赖）
    feature_names = iris.feature_names[:2]

    print("数据集信息:")
    print(f"样本数量: {X.shape[0]}, 特征数量: {X.shape[1]}")
    print(f"特征名称: {feature_names}")
    print(f"真实类别分布: {np.bincount(y_true)}")
    print("-" * 50)

    # 训练k-means模型（k=3，与Iris真实类别数一致）
    kmeans_model = KMeans(n_clusters=3, max_iter=100, tol=1e-4, random_state=42)
    kmeans_model.fit(X)
    y_pred = kmeans_model.labels
    centers = kmeans_model.centers
    sse = kmeans_model.calculate_sse(X)

    # 输出结果
    print("聚类结果:")
    print(f"预测簇标签分布: {np.bincount(y_pred)}")
    print(f"簇内误差平方和 (SSE): {sse:.4f}")
    print("最终聚类中心:")
    for i, center in enumerate(centers):
        print(f"  簇 {i + 1}: [{center[0]:.4f}, {center[1]:.4f}]")

    # 计算准确率（需要将预测标签与真实标签对齐）
    from scipy.stats import mode

    labels = np.zeros_like(y_pred)
    for i in range(3):
        mask = (y_pred == i)
        if np.sum(mask) > 0:
            labels[mask] = mode(y_true[mask])[0]
    accuracy = np.sum(labels == y_true) / len(y_true)
    print(f"聚类准确率 (与真实标签对齐后): {accuracy:.4f}")

    # 可视化聚类结果
    plt_obj = kmeans_model.plot_clusters(X, feature_names,
                                         'K-means Clustering on Iris Dataset (First 2 Features)')
    plt_obj.savefig('kmeans_iris_result.png', dpi=300, bbox_inches='tight')
    plt_obj.show()