import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import scale, StandardScaler
from sklearn.cluster import KMeans, AgglomerativeClustering, DBSCAN
from sklearn.metrics import silhouette_score, calinski_harabasz_score, davies_bouldin_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.decomposition import PCA
import seaborn as sns
from scipy.spatial.distance import cdist
from scipy.cluster.hierarchy import dendrogram, linkage
from scipy.stats import mode
import matplotlib as mpl
import platform
import os
from datetime import datetime

# 创建结果目录
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
RESULTS_DIR = f'results_{timestamp}'
os.makedirs(RESULTS_DIR, exist_ok=True)

# 设置中文字体
system = platform.system()
if system == 'Darwin':  # macOS
    plt.rcParams['font.family'] = ['Songti SC']  # 使用macOS自带的宋体
elif system == 'Windows':
    plt.rcParams['font.family'] = ['SimSun']  # 使用Windows自带的宋体
else:  # Linux
    plt.rcParams['font.family'] = ['Noto Sans CJK SC']  # 使用Noto字体

# 设置负号的正确显示
plt.rcParams['axes.unicode_minus'] = False

# 设置Seaborn的字体
sns.set(font=plt.rcParams['font.family'][0])

class ProteinAnalyzer:
    def __init__(self, filename):
        self.data = self._read_data(filename)
        if self.data is not None:
            self.countries = self.data['Country']
            self.numeric_data = self.data.drop('Country', axis=1)
            self.scaled_data = scale(self.numeric_data)
            self.pca = None
            self.pca_data = None
            self._perform_pca()
    
    def _read_data(self, filename):
        """读取数据文件"""
        try:
            data = pd.read_csv(filename, sep='\t')
            print("数据读取成功！")
            return data
        except FileNotFoundError:
            print(f"错误: 找不到文件 {filename}")
            return None
    
    def _perform_pca(self):
        """执行PCA降维"""
        self.pca = PCA()
        self.pca_data = self.pca.fit_transform(self.scaled_data)
        
        # 计算解释方差比
        explained_variance_ratio = self.pca.explained_variance_ratio_
        cumulative_variance_ratio = np.cumsum(explained_variance_ratio)
        
        # 绘制解释方差比图
        plt.figure(figsize=(10, 6))
        plt.plot(range(1, len(explained_variance_ratio) + 1), 
                cumulative_variance_ratio, 'bo-')
        plt.xlabel('主成分数量')
        plt.ylabel('累积解释方差比')
        plt.title('PCA解释方差比')
        plt.grid(True)
        plt.savefig(f'{RESULTS_DIR}/pca_variance_ratio.png')
        plt.close()
    
    def plot_data_distribution(self):
        """绘制数据分布图"""
        # 箱线图
        fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 10))
        
        # 原始数据箱线图
        sns.boxplot(data=self.numeric_data, ax=ax1)
        ax1.set_title('原始数据分布')
        ax1.set_xticklabels(ax1.get_xticklabels(), rotation=45)
        
        # 标准化后的数据箱线图
        sns.boxplot(data=pd.DataFrame(self.scaled_data, columns=self.numeric_data.columns), ax=ax2)
        ax2.set_title('标准化后的数据分布')
        ax2.set_xticklabels(ax2.get_xticklabels(), rotation=45)
        
        plt.tight_layout()
        plt.savefig(f'{RESULTS_DIR}/data_distribution.png')
        plt.close()
        
        # 相关性热力图
        plt.figure(figsize=(10, 8))
        sns.heatmap(self.numeric_data.corr(), annot=True, cmap='coolwarm', center=0)
        plt.title('特征相关性热力图')
        plt.tight_layout()
        plt.savefig(f'{RESULTS_DIR}/correlation_heatmap.png')
        plt.close()
    
    def analyze_optimal_clusters(self):
        """分析最优聚类数"""
        n_clusters_range = range(2, 11)
        silhouette_scores = []
        calinski_scores = []
        davies_scores = []
        
        for n_clusters in n_clusters_range:
            kmeans = KMeans(n_clusters=n_clusters, random_state=42)
            labels = kmeans.fit_predict(self.scaled_data)
            
            silhouette_scores.append(silhouette_score(self.scaled_data, labels))
            calinski_scores.append(calinski_harabasz_score(self.scaled_data, labels))
            davies_scores.append(davies_bouldin_score(self.scaled_data, labels))
        
        scores = {
            'silhouette': silhouette_scores,
            'calinski_harabasz': calinski_scores,
            'davies_bouldin': davies_scores,
            'n_clusters': list(n_clusters_range)
        }
        
        return scores
    
    def perform_ensemble_clustering(self, n_clusters=5):
        """执行集成聚类"""
        # 使用不同的聚类方法
        kmeans = KMeans(n_clusters=n_clusters, random_state=42)
        hierarchical = AgglomerativeClustering(n_clusters=n_clusters)
        
        # 获取每个方法的聚类结果
        kmeans_labels = kmeans.fit_predict(self.scaled_data)
        hierarchical_labels = hierarchical.fit_predict(self.scaled_data)
        
        # 创建投票矩阵
        n_samples = len(self.scaled_data)
        voting_matrix = np.zeros((n_samples, n_samples))
        
        # 根据每个聚类结果更新投票矩阵
        for labels in [kmeans_labels, hierarchical_labels]:
            for i in range(n_samples):
                for j in range(n_samples):
                    if labels[i] == labels[j]:
                        voting_matrix[i, j] += 1
        
        # 将投票矩阵转换为相似度矩阵
        similarity_matrix = voting_matrix / len([kmeans_labels, hierarchical_labels])
        
        # 使用层次聚类处理相似度矩阵
        final_clustering = AgglomerativeClustering(
            n_clusters=n_clusters,
            metric='precomputed',
            linkage='average'
        )
        
        # 将相似度矩阵转换为距离矩阵
        distance_matrix = 1 - similarity_matrix
        
        # 获取最终的聚类结果
        ensemble_labels = final_clustering.fit_predict(distance_matrix)
        
        return ensemble_labels
    
    def analyze_feature_importance(self, labels):
        """分析特征重要性"""
        rf = RandomForestClassifier(n_estimators=100, random_state=42)
        rf.fit(self.scaled_data, labels)
        
        # 特征重要性
        feature_imp = pd.DataFrame({
            'feature': self.numeric_data.columns,
            'importance': rf.feature_importances_
        }).sort_values('importance', ascending=False)
        
        # 绘制特征重要性图
        plt.figure(figsize=(10, 6))
        sns.barplot(x='importance', y='feature', data=feature_imp)
        plt.title('特征重要性分析')
        plt.tight_layout()
        plt.savefig(f'{RESULTS_DIR}/feature_importance.png')
        plt.close()
        
        return feature_imp
    
    def plot_clustering_results(self, labels, method_name):
        """绘制聚类结果"""
        method_dir = os.path.join(RESULTS_DIR, method_name)
        os.makedirs(method_dir, exist_ok=True)
        
        # 确保标签是一维数组
        labels = np.array(labels).ravel()
        
        # 绘制散点图
        plt.figure(figsize=(10, 8))
        scatter = plt.scatter(self.pca_data[:, 0], self.pca_data[:, 1],
                             c=labels, cmap='viridis')
        plt.colorbar(scatter)
        plt.title(f'第二主成分分析聚类结果-{method_name}')
        plt.xlabel('第一主成分')
        plt.ylabel('第二主成分')
        plt.savefig(f'{method_dir}/scatter_plot.png')
        plt.close()
        
        # 聚类结果表
        result_df = pd.DataFrame({
            'Country': self.countries,
            'Cluster': labels
        })
        result_df.to_csv(f'{method_dir}/clustering_results.csv', index=False)
        
        # 每个簇的特征分布
        plt.figure(figsize=(15, 8))
        for i in range(len(np.unique(labels))):
            cluster_data = self.numeric_data[labels == i]
            plt.boxplot(cluster_data.values,
                       positions=np.array(range(len(self.numeric_data.columns))) * 2.0 + i * 0.5,
                       tick_labels=self.numeric_data.columns,
                       widths=0.4)
        plt.title(f'{method_name}聚类结果的特征分布')
        plt.xticks(rotation=45)
        plt.tight_layout()
        plt.savefig(f'{method_dir}/cluster_distribution.png')
        plt.close()
        
        # 散点图矩阵
        df_with_clusters = self.numeric_data.copy()
        df_with_clusters['Cluster'] = labels
        g = sns.pairplot(df_with_clusters, hue='Cluster', diag_kind='hist')
        g.fig.suptitle(f'{method_name}聚类结果的特征关系', y=1.02)
        plt.savefig(f'{method_dir}/pairplot.png')
        plt.close()

    def plot_pca_variance_ratio(self):
        """绘制PCA方差解释比例"""
        plt.figure(figsize=(10, 6))
        variance_ratio = self.pca.explained_variance_ratio_
        cumulative_variance_ratio = np.cumsum(variance_ratio)
        
        plt.plot(range(1, len(variance_ratio) + 1), cumulative_variance_ratio, 'bo-')
        plt.xlabel('主成分数量')
        plt.ylabel('累积解释方差比')
        plt.title('PCA累积解释方差比')
        plt.grid(True)
        plt.savefig(f'{RESULTS_DIR}/pca_variance_ratio.png')
        plt.close()

    def plot_correlation_heatmap(self):
        """绘制特征相关性热力图"""
        plt.figure(figsize=(12, 10))
        correlation_matrix = self.numeric_data.corr()
        
        # 使用热力图可视化相关性矩阵
        sns.heatmap(correlation_matrix, 
                    annot=True,  # 显示相关系数
                    cmap='coolwarm',  # 使用红蓝色系
                    center=0,  # 将相关系数0设为白色
                    fmt='.2f')  # 保留两位小数
        
        plt.title('特征相关性热力图')
        plt.tight_layout()
        plt.savefig(f'{RESULTS_DIR}/correlation_heatmap.png')
        plt.close()

    def plot_clustering_metrics(self):
        """绘制聚类评估指标"""
        scores = self.analyze_optimal_clusters()
        
        plt.figure(figsize=(15, 5))
        
        # 绘制轮廓系数
        plt.subplot(131)
        plt.plot(scores['n_clusters'], scores['silhouette'], 'bo-')
        plt.xlabel('聚类数量')
        plt.ylabel('轮廓系数')
        plt.title('轮廓系数评估')
        plt.grid(True)
        
        # 绘制Calinski-Harabasz指数
        plt.subplot(132)
        plt.plot(scores['n_clusters'], scores['calinski_harabasz'], 'ro-')
        plt.xlabel('聚类数量')
        plt.ylabel('Calinski-Harabasz指数')
        plt.title('Calinski-Harabasz评估')
        plt.grid(True)
        
        # 绘制Davies-Bouldin指数
        plt.subplot(133)
        plt.plot(scores['n_clusters'], scores['davies_bouldin'], 'go-')
        plt.xlabel('聚类数量')
        plt.ylabel('Davies-Bouldin指数')
        plt.title('Davies-Bouldin评估')
        plt.grid(True)
        
        plt.tight_layout()
        plt.savefig(f'{RESULTS_DIR}/clustering_metrics.png')
        plt.close()

    def evaluate_clustering_method(self, method, name, data=None):
        """评估聚类方法的性能"""
        if data is None:
            data = self.scaled_data
        
        labels = method.fit_predict(data)
        
        # 计算评估指标
        silhouette = silhouette_score(data, labels)
        calinski = calinski_harabasz_score(data, labels)
        davies = davies_bouldin_score(data, labels)
        
        return {
            'name': name,
            'labels': labels,
            'scores': {
                'silhouette': silhouette,
                'calinski_harabasz': calinski,
                'davies_bouldin': davies
            }
        }

    def plot_models_comparison(self, results):
        """绘制不同模型的性能对比图"""
        plt.figure(figsize=(12, 6))
        
        # 准备数据
        methods = [r['name'] for r in results]
        silhouette_scores = [r['scores']['silhouette'] for r in results]
        calinski_scores = [r['scores']['calinski_harabasz'] for r in results]
        davies_scores = [r['scores']['davies_bouldin'] for r in results]
        
        # 归一化Calinski-Harabasz分数（因为其值域差异较大）
        calinski_scores = np.array(calinski_scores) / max(calinski_scores)
        
        x = np.arange(len(methods))
        width = 0.25
        
        # 绘制三个指标的柱状图
        plt.bar(x - width, silhouette_scores, width, label='轮廓系数', color='skyblue')
        plt.bar(x, calinski_scores, width, label='归一化Calinski-Harabasz指数', color='lightgreen')
        plt.bar(x + width, davies_scores, width, label='Davies-Bouldin指数', color='salmon')
        
        plt.xlabel('聚类方法')
        plt.ylabel('评估分数')
        plt.title('不同聚类方法的性能对比')
        plt.xticks(x, methods, rotation=45)
        plt.legend()
        plt.tight_layout()
        plt.savefig(f'{RESULTS_DIR}/models_comparison.png')
        plt.close()

    def select_best_model(self, results):
        """选择最佳模型"""
        # 计算综合得分（轮廓系数越大越好，CH指数越大越好，DB指数越小越好）
        scores = []
        for result in results:
            score = (result['scores']['silhouette'] + 
                    result['scores']['calinski_harabasz'] / max(r['scores']['calinski_harabasz'] for r in results) -
                    result['scores']['davies_bouldin'] / max(r['scores']['davies_bouldin'] for r in results))
            scores.append(score)
        
        # 返回得分最高的模型结果
        best_idx = np.argmax(scores)
        return results[best_idx]

def main():
    """主函数"""
    # 创建分析器实例
    analyzer = ProteinAnalyzer('protein.txt')
    
    # 执行基本分析
    analyzer.plot_pca_variance_ratio()
    analyzer.plot_correlation_heatmap()
    
    # 评估不同的聚类方法
    methods = {
        'KMeans': KMeans(n_clusters=5, random_state=42),
        'Hierarchical': AgglomerativeClustering(n_clusters=5),
        'Ensemble': None  # 将在后面单独处理
    }
    
    results = []
    
    # 评估基本方法
    for name, method in methods.items():
        if method is not None:
            result = analyzer.evaluate_clustering_method(method, name)
            results.append(result)
    
    # 评估集成方法
    ensemble_labels = analyzer.perform_ensemble_clustering(n_clusters=5)
    ensemble_result = {
        'name': 'Ensemble',
        'labels': ensemble_labels,
        'scores': {
            'silhouette': silhouette_score(analyzer.scaled_data, ensemble_labels),
            'calinski_harabasz': calinski_harabasz_score(analyzer.scaled_data, ensemble_labels),
            'davies_bouldin': davies_bouldin_score(analyzer.scaled_data, ensemble_labels)
        }
    }
    results.append(ensemble_result)
    
    # 绘制模型对比图
    analyzer.plot_models_comparison(results)
    
    # 选择最佳模型
    best_result = analyzer.select_best_model(results)
    print(f"\n最佳聚类方法: {best_result['name']}")
    
    # 只为最佳模型保存详细结果
    analyzer.plot_clustering_results(best_result['labels'], best_result['name'])
    
    # 特征重要性分析
    feature_importance = analyzer.analyze_feature_importance(best_result['labels'])
    print("\n特征重要性排序：")
    print(feature_importance)
    
    # 保存分析报告
    with open(f'{RESULTS_DIR}/analysis_report.txt', 'w', encoding='utf-8') as f:
        f.write("蛋白质数据聚类分析报告\n")
        f.write("=" * 50 + "\n\n")
        f.write(f"分析时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n")
        f.write(f"最佳聚类方法: {best_result['name']}\n\n")
        f.write("聚类评估指标:\n")
        for metric, value in best_result['scores'].items():
            f.write(f"{metric}: {value:.4f}\n")
        f.write("\n特征重要性排序:\n")
        f.write(feature_importance.to_string())

if __name__ == '__main__':
    main() 