import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import scale
from sklearn.cluster import KMeans, AgglomerativeClustering, DBSCAN
from sklearn.metrics import silhouette_score, calinski_harabasz_score, davies_bouldin_score
import seaborn as sns
from scipy.spatial.distance import cdist
import matplotlib as mpl
import platform
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline

# 设置中文显示
if platform.system() == 'Darwin':  # macOS
    plt.rcParams['font.family'] = ['Arial Unicode MS']
elif platform.system() == 'Windows':  # Windows
    plt.rcParams['font.family'] = ['Microsoft YaHei']
else:  # Linux
    plt.rcParams['font.family'] = ['DejaVu Sans']

plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号

# 读取数据
def read_protein_data(filename):
    try:
        # 读取数据，第一行作为列名
        data = pd.read_csv(filename, sep='\t')
        return data
    except FileNotFoundError:
        print(f"Error: {filename} not found")
        return None

# 数据处理和分析
def analyze_protein_data(data):
    if data is None:
        return None, None, None
    
    countries = data['Country']
    numeric_data = data.drop('Country', axis=1)
    
    # 计算描述性统计
    stats = numeric_data.describe()
    print("\n描述性统计：")
    print(stats)
    
    # 数据标准化
    scaled_data = scale(numeric_data)
    
    return scaled_data, numeric_data, countries

# 可视化函数 - 箱线图
def plot_boxplots(data, scaled_data, save_path=None):
    if data is None:
        return
    
    # 图5-5：原始数据箱线图
    plt.figure(figsize=(10, 6))
    plt.boxplot(data.values, tick_labels=data.columns)
    plt.title('原始数据箱线图')
    plt.ylabel('Value')
    plt.xticks(rotation=45)
    plt.tight_layout()
    if save_path:
        plt.savefig(f'{save_path}_original.png')
    plt.show()
    
    # 图5-6：标准化后的箱线图
    plt.figure(figsize=(10, 6))
    plt.boxplot(scaled_data, tick_labels=data.columns)
    plt.title('标准化后的箱线图')
    plt.ylabel('Z-Score')
    plt.xticks(rotation=45)
    plt.tight_layout()
    if save_path:
        plt.savefig(f'{save_path}_scaled.png')
    plt.show()

# 模型评估函数
def evaluate_clustering(data, labels):
    scores = {
        'silhouette_euclidean': silhouette_score(data, labels, metric='euclidean'),
        'silhouette_cosine': silhouette_score(data, labels, metric='cosine'),
        'calinski_harabasz': calinski_harabasz_score(data, labels),
        'davies_bouldin': davies_bouldin_score(data, labels)
    }
    return scores

# KMeans调参和训练
def tune_kmeans(scaled_data, n_clusters_range):
    print("\nKMeans调参结果：")
    best_score = -1
    best_params = None
    best_labels = None
    scores_dict = {
        'n_clusters': [],
        'silhouette_euclidean': [],
        'silhouette_cosine': [],
        'calinski_harabasz': [],
        'davies_bouldin': [],
        'inertia': []
    }
    
    for n_clusters in n_clusters_range:
        for init in ['k-means++', 'random']:
            for n_init in [10, 20, 30]:
                kmeans = KMeans(
                    n_clusters=n_clusters,
                    init=init,
                    n_init=n_init,
                    random_state=42
                )
                labels = kmeans.fit_predict(scaled_data)
                scores = evaluate_clustering(scaled_data, labels)
                
                # 记录评估指标
                if n_init == 10:  # 只记录一次n_clusters的结果
                    scores_dict['n_clusters'].append(n_clusters)
                    scores_dict['silhouette_euclidean'].append(scores['silhouette_euclidean'])
                    scores_dict['silhouette_cosine'].append(scores['silhouette_cosine'])
                    scores_dict['calinski_harabasz'].append(scores['calinski_harabasz'])
                    scores_dict['davies_bouldin'].append(scores['davies_bouldin'])
                    scores_dict['inertia'].append(-kmeans.inertia_)
                
                # 使用余弦相似度作为主要评估指标
                if scores['silhouette_cosine'] > best_score:
                    best_score = scores['silhouette_cosine']
                    best_params = {
                        'n_clusters': n_clusters,
                        'init': init,
                        'n_init': n_init
                    }
                    best_labels = labels
                
                print(f"n_clusters={n_clusters}, init={init}, n_init={n_init}")
                print(f"Silhouette (cosine): {scores['silhouette_cosine']:.3f}")
                print(f"Silhouette (euclidean): {scores['silhouette_euclidean']:.3f}")
                print(f"Calinski-Harabasz: {scores['calinski_harabasz']:.3f}")
                print(f"Davies-Bouldin: {scores['davies_bouldin']:.3f}")
                print("-" * 50)
    
    print("\n最佳参数：")
    print(best_params)
    print(f"最佳轮廓系数（余弦）：{best_score:.3f}")
    
    return best_params, best_labels, scores_dict

# 层次聚类
def hierarchical_clustering(scaled_data, n_clusters):
    print("\n层次聚类结果：")
    for linkage in ['ward', 'complete', 'average']:
        clustering = AgglomerativeClustering(
            n_clusters=n_clusters,
            linkage=linkage
        )
        labels = clustering.fit_predict(scaled_data)
        scores = evaluate_clustering(scaled_data, labels)
        
        print(f"Linkage: {linkage}")
        print(f"Silhouette (cosine): {scores['silhouette_cosine']:.3f}")
        print(f"Silhouette (euclidean): {scores['silhouette_euclidean']:.3f}")
        print(f"Calinski-Harabasz: {scores['calinski_harabasz']:.3f}")
        print(f"Davies-Bouldin: {scores['davies_bouldin']:.3f}")
        print("-" * 50)

# DBSCAN聚类
def dbscan_clustering(scaled_data):
    print("\nDBSCAN聚类结果：")
    for eps in [0.5, 1.0, 1.5]:
        for min_samples in [3, 4, 5]:
            clustering = DBSCAN(
                eps=eps,
                min_samples=min_samples
            )
            labels = clustering.fit_predict(scaled_data)
            
            # 检查是否有噪声点（标签为-1）
            n_noise = list(labels).count(-1)
            n_clusters = len(set(labels)) - (1 if -1 in labels else 0)
            
            if n_clusters > 1:  # 只评估有效的聚类结果
                scores = evaluate_clustering(scaled_data, labels)
                print(f"eps={eps}, min_samples={min_samples}")
                print(f"聚类数量: {n_clusters}")
                print(f"噪声点数量: {n_noise}")
                print(f"Silhouette (cosine): {scores['silhouette_cosine']:.3f}")
                print(f"Silhouette (euclidean): {scores['silhouette_euclidean']:.3f}")
                print(f"Calinski-Harabasz: {scores['calinski_harabasz']:.3f}")
                print(f"Davies-Bouldin: {scores['davies_bouldin']:.3f}")
                print("-" * 50)

# 绘制评估指标图
def plot_evaluation_metrics(scores_dict, save_path=None):
    metrics = ['silhouette_euclidean', 'silhouette_cosine', 
              'calinski_harabasz', 'davies_bouldin', 'inertia']
    
    fig, axes = plt.subplots(3, 2, figsize=(15, 18))
    axes = axes.ravel()
    
    for i, metric in enumerate(metrics):
        if i < len(axes):
            axes[i].plot(scores_dict['n_clusters'], scores_dict[metric], 'bo-')
            axes[i].set_xlabel('聚类数量')
            axes[i].set_ylabel(metric.replace('_', ' ').title())
            axes[i].grid(True)
    
    plt.tight_layout()
    if save_path:
        plt.savefig(f'{save_path}_metrics.png')
    plt.show()

# 绘制聚类结果
def plot_clustering_results(scaled_data, numeric_data, countries, n_clusters, save_path=None):
    kmeans = KMeans(n_clusters=n_clusters, random_state=42)
    cluster_labels = kmeans.fit_predict(scaled_data)
    
    # 图5-8：聚类结果散点图
    plt.figure(figsize=(8, 6))
    scatter = plt.scatter(scaled_data[:, 0], scaled_data[:, 1], 
                         c=cluster_labels, cmap='viridis')
    plt.xlabel('第一主成分')
    plt.ylabel('第二主成分')
    plt.title(f'K-means聚类结果 (k={n_clusters})')
    plt.colorbar(scatter)
    if save_path:
        plt.savefig(f'{save_path}_scatter.png')
    plt.show()
    
    # 图5-10：蛋白质数据表
    result_df = pd.DataFrame({
        'Country': countries,
        'Cluster': cluster_labels
    })
    print("\n聚类结果：")
    print(result_df)
    if save_path:
        result_df.to_csv(f'{save_path}_results.csv', index=False)
    
    # 图5-11：聚类结果箱线图
    plt.figure(figsize=(12, 6))
    for i in range(n_clusters):
        cluster_data = numeric_data[cluster_labels == i]
        plt.boxplot(cluster_data.values, 
                   positions=np.array(range(len(numeric_data.columns))) * 2.0 + i * 0.6,
                   tick_labels=numeric_data.columns,
                   widths=0.4)
    plt.title('各聚类的箱线图')
    plt.xticks(rotation=45)
    plt.tight_layout()
    if save_path:
        plt.savefig(f'{save_path}_cluster_boxplots.png')
    plt.show()
    
    # 图5-13：散点图矩阵
    df_with_clusters = numeric_data.copy()
    df_with_clusters['Cluster'] = cluster_labels
    sns.pairplot(df_with_clusters, hue='Cluster', diag_kind='hist')
    if save_path:
        plt.savefig(f'{save_path}_pairplot.png')
    plt.show()

def main():
    # 读取数据
    data = read_protein_data('protein.txt')
    
    # 数据预处理
    scaled_data, numeric_data, countries = analyze_protein_data(data)
    
    if scaled_data is None:
        return
    
    # 创建保存图表的目录
    os.makedirs('plots', exist_ok=True)
    
    # 保存箱线图
    plot_boxplots(numeric_data, scaled_data, 'plots/boxplots')
    
    # KMeans调参和训练
    n_clusters_range = range(2, 21)
    best_params, best_labels, scores_dict = tune_kmeans(scaled_data, n_clusters_range)
    
    # 绘制评估指标图
    plot_evaluation_metrics(scores_dict, 'plots/evaluation')
    
    # 使用最佳参数进行聚类
    plot_clustering_results(scaled_data, numeric_data, countries, 
                          best_params['n_clusters'], 'plots/clustering')
    
    # 尝试其他聚类方法
    hierarchical_clustering(scaled_data, best_params['n_clusters'])
    dbscan_clustering(scaled_data)

if __name__ == "__main__":
    import os
    os.makedirs('plots', exist_ok=True)
    main() 