import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import (
    accuracy_score,
    normalized_mutual_info_score,
    adjusted_rand_score,
    f1_score
)
from sklearn.preprocessing import LabelEncoder

class KMeans:
    def __init__(self, n_clusters=3, max_iter=300, tol=1e-4, random_state=None):
        self.n_clusters = n_clusters
        self.max_iter = max_iter
        self.tol = tol
        self.random_state = random_state
        self.centroids = None
        self.labels_ = None
        self.inertia_ = None

    def fit(self, X):
        if self.random_state is not None:
            np.random.seed(self.random_state)
            
        # 随机初始化质心
        n_samples = X.shape[0]
        random_indices = np.random.choice(n_samples, self.n_clusters, replace=False)
        self.centroids = X[random_indices]
        
        for i in range(self.max_iter):
            # 分配样本到最近的质心
            distances = self._calc_distances(X)
            labels = np.argmin(distances, axis=1)
            
            # 计算新的质心
            new_centroids = np.array([X[labels == k].mean(axis=0) 
                                    for k in range(self.n_clusters)])
            
            # 检查收敛
            if np.allclose(self.centroids, new_centroids, atol=self.tol):
                break
                
            self.centroids = new_centroids
        
        self.labels_ = labels
        self.inertia_ = np.sum(np.min(distances, axis=1))
        
    def _calc_distances(self, X):
        return np.array([np.sum((X - centroid)**2, axis=1) 
                        for centroid in self.centroids]).T

    def predict(self, X):
        distances = self._calc_distances(X)
        return np.argmin(distances, axis=1)

def load_iris_data(filepath):
    data = pd.read_csv(filepath)
    X = data.iloc[:, :-1].values
    y = data.iloc[:, -1].values
    
    # 编码标签为数值
    le = LabelEncoder()
    y = le.fit_transform(y)
    
    return X, y

def evaluate_clustering(y_true, y_pred):
    acc = accuracy_score(y_true, y_pred)
    nmi = normalized_mutual_info_score(y_true, y_pred)
    ari = adjusted_rand_score(y_true, y_pred)
    f_measure = f1_score(y_true, y_pred, average='weighted')
    
    return {
        'Accuracy': acc,
        'NMI': nmi,
        'ARI': ari,
        'F_measure': f_measure
    }

def visualize_results(X, y_true, y_pred, feature_names=None):
    plt.figure(figsize=(15, 6))
    
    # 真实标签可视化
    plt.subplot(1, 2, 1)
    plt.scatter(X[:, 0], X[:, 1], c=y_true, cmap='viridis')
    plt.title('True Clusters')
    if feature_names:
        plt.xlabel(feature_names[0])
        plt.ylabel(feature_names[1])
    
    # 预测标签可视化
    plt.subplot(1, 2, 2)
    plt.scatter(X[:, 0], X[:, 1], c=y_pred, cmap='viridis')
    plt.title('Predicted Clusters')
    if feature_names:
        plt.xlabel(feature_names[0])
        plt.ylabel(feature_names[1])
    
    plt.tight_layout()
    plt.show()

if __name__ == "__main__":
    # 加载数据
    X, y = load_iris_data('./exp6/src/iris.csv')
    
    # 运行KMeans
    kmeans = KMeans(n_clusters=3, max_iter=300, random_state=42)
    kmeans.fit(X)
    y_pred = kmeans.labels_
    
    # 评估结果
    metrics = evaluate_clustering(y, y_pred)
    print("聚类评估指标:")
    for name, value in metrics.items():
        print(f"{name}: {value:.4f}")
    
    # 可视化
    visualize_results(X, y, y_pred, feature_names=['Sepal Length', 'Sepal Width'])