# 导入必要的库
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score, adjusted_rand_score, davies_bouldin_score, calinski_harabasz_score
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D

# 设置matplotlib支持中文（解决中文显示问题）
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

# 1. 数据准备
iris = load_iris()
X = iris.data
y = iris.target

# 数据清洗：检查并处理缺失值
if np.any(np.isnan(X)):
    print("警告：数据包含缺失值，进行均值填充...")
    column_means = np.nanmean(X, axis=0)
    column_means = np.where(np.isnan(column_means), 0, column_means)
    X = np.where(np.isnan(X), np.tile(column_means, (X.shape[0], 1)), X)
else:
    print("数据无缺失值，跳过清洗步骤")

# 数据标准化
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)

# 2. 初始化
clusters = [X_scaled]
n_clusters = 1
target_clusters = 3
max_iterations = 100

# 3-6. 二阶聚类主循环（增加空簇检查和调试输出）
iteration = 0
while n_clusters < target_clusters and iteration < max_iterations:
    # 3. 选择待分割的簇
    variances = [np.var(cluster, axis=0).mean() for cluster in clusters]
    max_variance_idx = np.argmax(variances)
    cluster_to_split = clusters[max_variance_idx]
    print(f"迭代 {iteration + 1}: 选择簇 {max_variance_idx}，样本数: {len(cluster_to_split)}，方差: {variances[max_variance_idx]:.4f}")

    # 4. 应用K-Means (k=2)
    kmeans = KMeans(n_clusters=2, random_state=42)
    labels = kmeans.fit_predict(cluster_to_split)
    cluster1 = cluster_to_split[labels == 0]
    cluster2 = cluster_to_split[labels == 1]

    # 检查空簇
    if len(cluster1) == 0 or len(cluster2) == 0:
        print(f"警告：分割产生空簇，簇1样本数: {len(cluster1)}，簇2样本数: {len(cluster2)}，跳过分割")
        iteration += 1
        continue

    # 5. 评估分割
    sse_original = np.sum((cluster_to_split - np.mean(cluster_to_split, axis=0)) ** 2)
    sse_new = np.sum((cluster1 - np.mean(cluster1, axis=0)) ** 2) + np.sum((cluster2 - np.mean(cluster2, axis=0)) ** 2)
    print(f"  SSE原始: {sse_original:.4f}, SSE分割后: {sse_new:.4f}")

    # 如果SSE减小，或者至少强制分割一次
    if sse_new < sse_original or n_clusters == 1:  # 强制第一次分割
        # 6. 更新簇集合
        clusters.pop(max_variance_idx)
        clusters.append(cluster1)
        clusters.append(cluster2)
        n_clusters += 1
        print(f"  分割成功，当前簇数: {n_clusters}")
    else:
        print("  SSE未减小，跳过分割")
    iteration += 1

print(f"二阶聚类完成，迭代次数: {iteration}, 最终簇数: {n_clusters}")

# 7. 结果分析与评估
all_labels = np.zeros(len(X_scaled), dtype=int)
start_idx = 0
for i, cluster in enumerate(clusters):
    end_idx = start_idx + len(cluster)
    all_labels[start_idx:end_idx] = i
    start_idx = end_idx

# 检查标签唯一值
unique_labels = np.unique(all_labels)
print(f"唯一标签值: {unique_labels}")
if len(unique_labels) < 2:
    print("错误：只有一个簇，无法计算轮廓系数。")
else:
    # 模型评价指标
    silhouette_avg = silhouette_score(X_scaled, all_labels)
    ari = adjusted_rand_score(y, all_labels)
    db_index = davies_bouldin_score(X_scaled, all_labels)
    ch_score = calinski_harabasz_score(X_scaled, all_labels)

    print(f"轮廓系数 (Silhouette Score): {silhouette_avg:.4f} （[-1,1], 越高越好）")
    print(f"调整后的Rand指数 (Adjusted Rand Score): {ari:.4f} （[-1,1], 越高越好）")
    print(f"Davies-Bouldin Index: {db_index:.4f} （[0,+∞), 越低越好）")
    print(f"Calinski-Harabasz Index: {ch_score:.4f} （[0,+∞), > 100：聚类效果通常较好 越高越好）")

# 8. 可视化
pca_2d = PCA(n_components=2)
X_pca_2d = pca_2d.fit_transform(X_scaled)

pca_3d = PCA(n_components=3)
X_pca_3d = pca_3d.fit_transform(X_scaled)

# 可视化1: 2D散点图（聚类结果）
plt.figure(figsize=(10, 6))
scatter = plt.scatter(X_pca_2d[:, 0], X_pca_2d[:, 1], c=all_labels, cmap='viridis', edgecolor='k')
plt.title(f'二阶聚类结果 (簇数={n_clusters})')
plt.xlabel('第一主成分')
plt.ylabel('第二主成分')
plt.colorbar(scatter, label='簇标签')
plt.show()

# 可视化2: 2D散点图（真实标签对比）
plt.figure(figsize=(10, 6))
scatter = plt.scatter(X_pca_2d[:, 0], X_pca_2d[:, 1], c=y, cmap='viridis', edgecolor='k')
plt.title('真实标签分布')
plt.xlabel('第一主成分')
plt.ylabel('第二主成分')
plt.colorbar(scatter, label='真实类别')
plt.show()

# 可视化3: 3D散点图（聚类结果）
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(111, projection='3d')
scatter = ax.scatter(X_pca_3d[:, 0], X_pca_3d[:, 1], X_pca_3d[:, 2], c=all_labels, cmap='viridis', edgecolor='k')
ax.set_title(f'二阶聚类结果3D视图 (簇数={n_clusters})')
ax.set_xlabel('第一主成分')
ax.set_ylabel('第二主成分')
ax.set_zlabel('第三主成分')
plt.colorbar(scatter, label='簇标签')
plt.show()

# 可视化4: 簇中心在2D中的投影
cluster_centers_2d = pca_2d.transform(np.array([np.mean(cluster, axis=0) for cluster in clusters]))
plt.figure(figsize=(10, 6))
plt.scatter(X_pca_2d[:, 0], X_pca_2d[:, 1], c=all_labels, cmap='viridis', alpha=0.5, edgecolor='k')
plt.scatter(cluster_centers_2d[:, 0], cluster_centers_2d[:, 1], c='red', marker='x', s=200, label='簇中心')
plt.title(f'簇中心投影 (簇数={n_clusters})')
plt.xlabel('第一主成分')
plt.ylabel('第二主成分')
plt.legend()
plt.show()

# 可视化5: 轮廓系数曲线（动态选择最佳簇数）
silhouette_scores = []
k_values = range(2, 6)
for k in k_values:
    clusters_k = [X_scaled]
    n_clusters_k = 1
    iteration = 0
    while n_clusters_k < k and iteration < max_iterations:
        variances = [np.var(cluster, axis=0).mean() for cluster in clusters_k]
        max_variance_idx = np.argmax(variances)
        cluster_to_split = clusters_k[max_variance_idx]
        kmeans = KMeans(n_clusters=2, random_state=42)
        labels = kmeans.fit_predict(cluster_to_split)
        cluster1 = cluster_to_split[labels == 0]
        cluster2 = cluster_to_split[labels == 1]
        if len(cluster1) == 0 or len(cluster2) == 0:
            iteration += 1
            continue
        sse_original = np.sum((cluster_to_split - np.mean(cluster_to_split, axis=0)) ** 2)
        sse_new = np.sum((cluster1 - np.mean(cluster1, axis=0)) ** 2) + np.sum((cluster2 - np.mean(cluster2, axis=0)) ** 2)
        if sse_new < sse_original or n_clusters_k == 1:
            clusters_k.pop(max_variance_idx)
            clusters_k.append(cluster1)
            clusters_k.append(cluster2)
            n_clusters_k += 1
        iteration += 1
    all_labels_k = np.zeros(len(X_scaled), dtype=int)
    start_idx = 0
    for i, cluster in enumerate(clusters_k):
        end_idx = start_idx + len(cluster)
        all_labels_k[start_idx:end_idx] = i
        start_idx = end_idx
    if len(np.unique(all_labels_k)) < 2:
        silhouette_scores.append(-1)  # 无法计算轮廓系数时设置为-1
    else:
        silhouette_scores.append(silhouette_score(X_scaled, all_labels_k))

plt.figure(figsize=(10, 6))
plt.plot(k_values, silhouette_scores, marker='o')
plt.title('不同簇数下的轮廓系数曲线')
plt.xlabel('簇数 (k)')
plt.ylabel('轮廓系数')
plt.grid(True)
plt.show()

# 统计每个簇的样本数量
print("\n每个簇的样本数量:")
for i in range(n_clusters):
    count = np.sum(all_labels == i)
    print(f"簇 {i} 包含 {count} 个样本")

# 计算每个簇的平均特征
cluster_centers = [np.mean(clusters[i], axis=0) for i in range(n_clusters)]
print("\n每个簇的平均特征（标准化后）:")
for i, center in enumerate(cluster_centers):
    print(f"簇 {i} 平均特征: {center}")