import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.metrics import normalized_mutual_info_score, adjusted_rand_score
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import pandas as pd
from collections import Counter
import matplotlib.cm as cm
plt.rcParams["font.family"] = ["SimHei"]
class KMeans:
    def __init__(self, n_clusters=3, max_iter=300, tol=1e-4):
        self.n_clusters = n_clusters
        self.max_iter = max_iter
        self.tol = tol
        self.centroids = None
        self.labels = None
        self.inertia = None
        
    def fit(self, X):
        # 随机初始化质心
        n_samples, n_features = X.shape
        idx = np.random.choice(n_samples, self.n_clusters, replace=False)
        self.centroids = X[idx]
        
        for iter in range(self.max_iter):
            # 分配样本到最近的质心
            distances = self._calc_distances(X)
            self.labels = np.argmin(distances, axis=1)
            
            # 更新质心
            new_centroids = np.array([X[self.labels == k].mean(axis=0) for k in range(self.n_clusters)])
            
            # 计算质心移动距离
            centroid_shift = np.sum(np.linalg.norm(new_centroids - self.centroids, axis=1))
            
            # 更新质心
            self.centroids = new_centroids
            
            # 计算惯性（总距离平方和）
            # 修复：处理空簇的情况
            self.inertia = 0
            for k in range(self.n_clusters):
                cluster_points = X[self.labels == k]
                if len(cluster_points) > 0:
                    self.inertia += np.sum(np.linalg.norm(cluster_points - self.centroids[k], axis=1)**2)
            
            # 检查收敛
            if centroid_shift < self.tol:
                print(f"Converged after {iter+1} iterations")
                break
                
        return self
    
    def _calc_distances(self, X):
        """计算每个样本到各个质心的欧氏距离"""
        distances = np.zeros((X.shape[0], self.n_clusters))
        for k in range(self.n_clusters):
            distances[:, k] = np.linalg.norm(X - self.centroids[k], axis=1)
        return distances
    
    def predict(self, X):
        """预测新样本的聚类标签"""
        distances = self._calc_distances(X)
        return np.argmin(distances, axis=1)

def purity_score(y_true, y_pred):
    """计算纯度"""
    # 创建混淆矩阵
    y_true = y_true.astype(np.int64)
    assert y_pred.size == y_true.size
    D = max(y_pred.max(), y_true.max()) + 1
    w = np.zeros((D, D), dtype=np.int64)
    for i in range(y_pred.size):
        w[y_pred[i], y_true[i]] += 1
    # 计算每个簇的最大真实类别数之和
    return np.sum(np.amax(w, axis=1)) / float(np.sum(w))

def f_measure(y_true, y_pred):
    """计算F-measure"""
    # 创建混淆矩阵
    y_true = y_true.astype(np.int64)
    assert y_pred.size == y_true.size
    D = max(y_pred.max(), y_true.max()) + 1
    w = np.zeros((D, D), dtype=np.int64)
    for i in range(y_pred.size):
        w[y_pred[i], y_true[i]] += 1
    
    # 计算每个簇的F1分数
    f1_scores = []
    for k in range(D):
        if np.sum(w[k]) == 0:
            continue
        precision = np.max(w[k]) / np.sum(w[k])
        recall = np.max(w[k]) / np.sum(w[:, np.argmax(w[k])])
        if precision + recall == 0:
            f1 = 0
        else:
            f1 = 2 * (precision * recall) / (precision + recall)
        f1_scores.append(f1)
    
    # 计算加权平均F1分数
    return np.mean(f1_scores)

def accuracy(y_true, y_pred):
    """计算准确率"""
    # 找到最佳匹配
    y_true = y_true.astype(np.int64)
    assert y_pred.size == y_true.size
    D = max(y_pred.max(), y_true.max()) + 1
    w = np.zeros((D, D), dtype=np.int64)
    for i in range(y_pred.size):
        w[y_pred[i], y_true[i]] += 1
    
    # 使用匈牙利算法找到最优匹配
    from scipy.optimize import linear_sum_assignment
    row_ind, col_ind = linear_sum_assignment(-w)
    
    # 计算准确率
    return w[row_ind, col_ind].sum() / y_pred.size

# 加载数据集
iris = datasets.load_iris()
X = iris.data
y = iris.target
feature_names = iris.feature_names

# 数据标准化
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)

# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.3, random_state=42)

# 运行KMeans算法
kmeans = KMeans(n_clusters=3, max_iter=100, tol=1e-4)
kmeans.fit(X_train)

# 预测
y_pred = kmeans.predict(X_test)

# 计算聚类指标
f1 = f_measure(y_test, y_pred)
acc = accuracy(y_test, y_pred)
nmi = normalized_mutual_info_score(y_test, y_pred)
ari = adjusted_rand_score(y_test, y_pred)

# 打印指标
print(f"F-measure: {f1:.4f}")
print(f"Accuracy: {acc:.4f}")
print(f"NMI: {nmi:.4f}")
print(f"ARI: {ari:.4f}")

# 可视化聚类结果
plt.figure(figsize=(14, 5))

# 原始数据分布
plt.subplot(1, 2, 1)
plt.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm.viridis, edgecolor='k', s=50)
plt.title('原始数据分布')
plt.xlabel(feature_names[0])
plt.ylabel(feature_names[1])

# 聚类结果
plt.subplot(1, 2, 2)
plt.scatter(X_test[:, 0], X_test[:, 1], c=y_pred, cmap=cm.viridis, edgecolor='k', s=50)
plt.scatter(kmeans.centroids[:, 0], kmeans.centroids[:, 1], marker='*', s=300, c='red', label='Centroids')
plt.title('KMeans聚类结果')
plt.xlabel(feature_names[0])
plt.ylabel(feature_names[1])
plt.legend()

plt.tight_layout()
plt.show()

# 可视化聚类过程中的惯性变化
plt.figure(figsize=(10, 6))
k_range = range(1, 11)
inertia_values = []

for k in k_range:
    kmeans = KMeans(n_clusters=k, max_iter=100, tol=1e-4)
    kmeans.fit(X_scaled)
    inertia_values.append(kmeans.inertia)

plt.plot(k_range, inertia_values, 'bo-')
plt.xlabel('Number of clusters (k)')
plt.ylabel('Inertia')
plt.title('The Elbow Method showing the optimal k')
plt.show()

# 保存结果到DataFrame
results = pd.DataFrame({
    'Metric': ['F-measure', 'Accuracy', 'NMI', 'ARI'],
    'Value': [f1, acc, nmi, ari]
})

print("\n聚类评估指标:")
print(results)