import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import normalized_mutual_info_score, adjusted_rand_score, f1_score, accuracy_score
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.decomposition import PCA
import os
import logging
from sklearn.metrics.cluster import contingency_matrix  # 添加这一行

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# 设置matplotlib支持中文显示
plt.rcParams["font.family"] = ["SimHei", "WenQuanYi Micro Hei", "Heiti TC", "sans-serif"]
plt.rcParams["axes.unicode_minus"] = False  # 解决负号显示问题

class KMeans:
    def __init__(self, n_clusters=3, max_iter=100, tol=1e-4, n_init=10, random_state=None):
        """
        KMeans聚类算法实现
        
        参数:
            n_clusters: 聚类数量
            max_iter: 最大迭代次数
            tol: 收敛阈值
            n_init: 初始中心的随机初始化次数，选择最佳结果
            random_state: 随机种子
        """
        self.n_clusters = n_clusters
        self.max_iter = max_iter
        self.tol = tol
        self.n_init = n_init
        self.random_state = random_state
        self.centroids = None
        self.labels_ = None
        self.inertia_ = float('inf')  # 聚类内部平方和
        self.n_iter_ = 0
        
    def fit(self, X):
        """执行KMeans聚类"""
        n_samples, n_features = X.shape
        best_inertia = float('inf')
        best_centroids = None
        best_labels = None
        best_n_iter = 0
        
        # 多次随机初始化，选择最佳结果
        for i in range(self.n_init):
            logger.debug(f"初始化 {i+1}/{self.n_init}")
            
            # 随机初始化中心点
            np.random.seed(self.random_state + i if self.random_state is not None else None)
            indices = np.random.choice(n_samples, self.n_clusters, replace=False)
            centroids = X[indices].copy()
            
            # 迭代优化
            for iter_idx in range(self.max_iter):
                # 计算每个样本到各中心点的距离
                distances = self._calculate_distances(X, centroids)
                
                # 分配样本到最近的中心点
                labels = np.argmin(distances, axis=1)
                
                # 更新中心点
                new_centroids = np.array([
                    X[labels == j].mean(axis=0) if np.sum(labels == j) > 0 else centroids[j]
                    for j in range(self.n_clusters)
                ])
                
                # 检查是否收敛
                centroid_shift = np.linalg.norm(new_centroids - centroids)
                if centroid_shift < self.tol:
                    break
                    
                centroids = new_centroids
            
            # 计算聚类内部平方和
            distances = self._calculate_distances(X, centroids)
            inertia = np.sum(np.min(distances, axis=1) ** 2)
            
            # 更新最佳结果
            if inertia < best_inertia:
                best_inertia = inertia
                best_centroids = centroids
                best_labels = labels
                best_n_iter = iter_idx + 1
        
        # 保存最佳结果
        self.centroids = best_centroids
        self.labels_ = best_labels
        self.inertia_ = best_inertia
        self.n_iter_ = best_n_iter
        
        logger.info(f"KMeans聚类完成，迭代次数: {self.n_iter_}, 聚类内部平方和: {self.inertia_:.4f}")
        return self
    
    def _calculate_distances(self, X, centroids):
        """计算每个样本到各中心点的欧氏距离"""
        n_samples = X.shape[0]
        distances = np.zeros((n_samples, self.n_clusters))
        
        # 向量化计算距离
        for i in range(self.n_clusters):
            distances[:, i] = np.linalg.norm(X - centroids[i], axis=1)
            
        return distances
    
    def predict(self, X):
        """预测新样本的聚类"""
        distances = self._calculate_distances(X, self.centroids)
        return np.argmin(distances, axis=1)

class DataProcessor:
    @staticmethod
    def load_data(file_path):
        """加载数据集"""
        if not os.path.exists(file_path):
            logger.error(f"文件不存在: {file_path}")
            return None, None
        
        try:
            import pandas as pd
            df = pd.read_csv(file_path, header=None)
            
            # 假设最后一列是标签
            X = df.iloc[:, :-1].values
            y = df.iloc[:, -1].values
            
            # 将标签编码为数字
            le = LabelEncoder()
            y = le.fit_transform(y)
            
            logger.info(f"成功加载数据，样本数: {X.shape[0]}, 特征数: {X.shape[1]}, 类别数: {len(np.unique(y))}")
            return X, y
        except ImportError:
            logger.warning("pandas未安装，使用numpy加载数据")
            data = np.loadtxt(file_path, delimiter=',', dtype=str)
            X = data[:, :-1].astype(float)
            
            y = np.zeros(len(data), dtype=int)
            unique_labels = np.unique(data[:, -1])
            for i, label in enumerate(unique_labels):
                y[data[:, -1] == label] = i
                
            return X, y
    
    @staticmethod
    def preprocess(X, standardize=True, pca_components=None):
        """
        数据预处理
        
        参数:
            X: 特征数据
            standardize: 是否标准化
            pca_components: PCA降维的维度，None表示不进行PCA
        """
        if standardize:
            scaler = StandardScaler()
            X = scaler.fit_transform(X)
            logger.info("数据标准化完成")
            
        if pca_components is not None and pca_components < X.shape[1]:
            pca = PCA(n_components=pca_components)
            X = pca.fit_transform(X)
            logger.info(f"PCA降维完成，降维后维度: {X.shape[1]}")
            
        return X

class ClusterEvaluator:
    @staticmethod
    def calculate_metrics(y_true, y_pred):
        """计算聚类评估指标"""
        # F-measure (使用宏平均)
        f_measure = f1_score(y_true, y_pred, average='macro')
        
        # 计算ACC (需要映射聚类标签到真实标签)
        from scipy.optimize import linear_sum_assignment
        n_clusters = len(np.unique(y_pred))
        cost = np.zeros((n_clusters, n_clusters))
        
        for i in range(n_clusters):
            for j in range(n_clusters):
                cost[i, j] = -np.sum((y_pred == i) & (y_true == j))
        
        row_ind, col_ind = linear_sum_assignment(cost)
        y_pred_mapped = np.zeros_like(y_pred)
        
        for i in range(n_clusters):
            y_pred_mapped[y_pred == i] = col_ind[i]
        
        acc = accuracy_score(y_true, y_pred_mapped)
        
        # NMI (归一化互信息)
        nmi = normalized_mutual_info_score(y_true, y_pred)
        
        # RI (兰德指数)
        from sklearn.metrics import rand_score
        ri = rand_score(y_true, y_pred)
        
        # ARI (调整兰德指数)
        ari = adjusted_rand_score(y_true, y_pred)
        
        # 计算纯度
        contingency = contingency_matrix(y_true, y_pred)
        purity = np.sum(np.max(contingency, axis=0)) / np.sum(contingency)
        
        metrics = {
            'F_measure': f_measure,
            'ACC': acc,
            'NMI': nmi,
            'RI': ri,
            'ARI': ari,
            'Purity': purity
        }
        
        return metrics
    
    @staticmethod
    def visualize_clusters(X, y_true, y_pred, centroids=None, feature_names=None, title=None):
        """
        可视化聚类结果
        
        参数:
            X: 特征数据
            y_true: 真实标签
            y_pred: 预测标签
            centroids: 聚类中心
            feature_names: 特征名称
            title: 图表标题
        """
        if X.shape[1] >= 2:
            # 选择前两个特征进行可视化
            plt.figure(figsize=(15, 5))
            
            # 真实标签
            plt.subplot(1, 3, 1)
            scatter = plt.scatter(X[:, 0], X[:, 1], c=y_true, cmap='viridis', edgecolor='k', alpha=0.8)
            plt.title('真实标签')
            if feature_names:
                plt.xlabel(feature_names[0])
                plt.ylabel(feature_names[1])
            plt.colorbar(scatter, ticks=np.unique(y_true))
            
            # 聚类标签
            plt.subplot(1, 3, 2)
            scatter = plt.scatter(X[:, 0], X[:, 1], c=y_pred, cmap='viridis', edgecolor='k', alpha=0.8)
            plt.title('KMeans聚类结果')
            if feature_names:
                plt.xlabel(feature_names[0])
                plt.ylabel(feature_names[1])
            plt.colorbar(scatter, ticks=np.unique(y_pred))
            
            # 聚类标签(带中心)
            plt.subplot(1, 3, 3)
            scatter = plt.scatter(X[:, 0], X[:, 1], c=y_pred, cmap='viridis', edgecolor='k', alpha=0.8)
            if centroids is not None:
                plt.scatter(centroids[:, 0], centroids[:, 1], c='red', marker='X', s=200, label='聚类中心')
            plt.title('KMeans聚类结果(带中心)')
            if feature_names:
                plt.xlabel(feature_names[0])
                plt.ylabel(feature_names[1])
            plt.colorbar(scatter, ticks=np.unique(y_pred))
            plt.legend()
            
            if title:
                plt.suptitle(title, fontsize=16)
                
            plt.tight_layout(rect=[0, 0, 1, 0.95])  # 调整布局，为标题留出空间
            plt.show()
        else:
            logger.warning("特征维度小于2，无法可视化")

def main():
    # 文件路径
    file_path = r"D:\irisdata.txt"
    
    # 数据处理
    X, y = DataProcessor.load_data(file_path)
    if X is None:
        return
    
    # 数据预处理
    X_processed = DataProcessor.preprocess(X, standardize=True)
    
    # 设置KMeans参数
    n_clusters = len(np.unique(y))  # 自动确定聚类数量
    max_iter = 100
    tol = 1e-4
    n_init = 10  # 多次初始化，避免局部最优
    
    # 执行KMeans聚类
    logger.info(f"开始KMeans聚类，聚类数: {n_clusters}")
    kmeans = KMeans(n_clusters=n_clusters, max_iter=max_iter, tol=tol, n_init=n_init, random_state=42)
    kmeans.fit(X_processed)
    y_pred = kmeans.predict(X_processed)
    
    # 计算评估指标
    metrics = ClusterEvaluator.calculate_metrics(y, y_pred)
    logger.info("聚类评估指标:")
    for metric, value in metrics.items():
        logger.info(f"{metric}: {value:.4f}")
    
    # 可视化聚类结果
    feature_names = ['萼片长度', '萼片宽度', '花瓣长度', '花瓣宽度']
    ClusterEvaluator.visualize_clusters(
        X_processed, y, y_pred, 
        centroids=kmeans.centroids, 
        feature_names=feature_names[:2],
        title="KMeans聚类结果"
    )
    
    # 如果特征维度大于2，使用PCA降维后可视化
    if X.shape[1] > 2:
        logger.info("使用PCA降维后可视化")
        X_pca = DataProcessor.preprocess(X, standardize=True, pca_components=2)
        kmeans_pca = KMeans(n_clusters=n_clusters, random_state=42)
        kmeans_pca.fit(X_pca)
        y_pred_pca = kmeans_pca.predict(X_pca)
        
        ClusterEvaluator.visualize_clusters(
            X_pca, y, y_pred_pca, 
            centroids=kmeans_pca.centroids,
            feature_names=['PCA成分1', 'PCA成分2'],
            title="PCA降维后的KMeans聚类结果"
        )

if __name__ == "__main__":
    main()
