import time
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from scipy.cluster.hierarchy import dendrogram, linkage, fcluster
from sklearn.metrics import silhouette_score
from sklearn.preprocessing import StandardScaler
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
from matplotlib import rcParams

import matplotlib.pyplot as plt
from matplotlib import rcParams

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei']  # 使用黑体
plt.rcParams['axes.unicode_minus'] = False  # 解决负号显示问题
rcParams['font.size'] = 12  # 设置字体大小

# 1. 加载数据
data = pd.read_csv('../StudentPerformanceFactors.csv')
print(f"原始数据量: {len(data)} 条")

# 删除包含空值的行
data = data.dropna()
print(f"去除空值后的数据量: {len(data)} 条")
# 二元分类编码
binary_cols = {
    'Extracurricular_Activities': {'No': 0, 'Yes': 1},
    'Internet_Access': {'No': 0, 'Yes': 1},
    'Learning_Disabilities': {'No': 0, 'Yes': 1}
}
#遍历 multi_cols 字典，对每一列进行数值映射
for col, mapping in binary_cols.items():
    data[col] = data[col].map(mapping)

# 多分类编码
multi_cols = {
    'Parental_Involvement': {'Low': 0, 'Medium': 1, 'High': 2},
    'Access_to_Resources': {'Low': 0, 'Medium': 1, 'High': 2},
    'Motivation_Level': {'Low': 0, 'Medium': 1, 'High': 2},
    'Family_Income': {'Low': 0, 'Medium': 1, 'High': 2},
    'Teacher_Quality': {'Low': 0, 'Medium': 1, 'High': 2},
    'School_Type': {'Public': 0, 'Private': 1},
    'Peer_Influence': {'Negative': 0, 'Neutral': 1, 'Positive': 2},
    'Parental_Education_Level': {'High School': 0, 'College': 1, 'Postgraduate': 2},
    'Distance_from_Home': {'Near': 0, 'Moderate': 1, 'Far': 2},
    'Gender': {'Male': 0, 'Female': 1}
}

for col, mapping in multi_cols.items():
    data[col] = data[col].map(mapping)

# 3. 选择特征列（手动剔除部分效果不好的列）
#全部标签如下
# cat_cols = ['Hours_Studied','Attendance','Parental_Involvement', 'Access_to_Resources', 'Extracurricular_Activities','Sleep_Hours','Previous_Scores',
#             'Motivation_Level', 'Internet_Access', 'Tutoring_Sessions','Family_Income', 'Teacher_Quality',
#             'School_Type', 'Peer_Influence', 'Physical_Activity','Learning_Disabilities',
#             'Parental_Education_Level', 'Distance_from_Home', 'Gender']
drop_cols = ['Parental_Involvement', 'Extracurricular_Activities',
             'Attendance', 'Internet_Access', 'Family_Income',
             'Distance_from_Home', 'Physical_Activity',
             'Access_to_Resources', 'School_Type', 'Peer_Influence',
             'Gender', 'Sleep_Hours', 'Tutoring_Sessions']
features = data.drop(columns=drop_cols)
print(f"用于聚类的特征维度: {features.shape[1]}")
print("使用的特征列:", features.columns.tolist())

# 数据标准化
scaler = StandardScaler()
features_scaled = scaler.fit_transform(features)

#---------------------------------------------层次聚类算法------------------------------------------------------#
def hierarchical_clustering(data, k=None, method='ward', metric='euclidean'):
    """
    执行层次聚类
    参数:
        data: 标准化后的数据
        k: 聚类数量 (None则只计算链接矩阵)
        method: 链接方法 ('ward', 'complete', 'average', 'single')
        metric: 距离度量 ('euclidean', 'cosine', 'cityblock'等)
    返回:
        聚类标签和链接矩阵
    """
    print(f"\n正在执行层次聚类(method={method}, metric={metric})...")
    start_time = time.time()

    # 计算链接矩阵
    Z = linkage(data, method=method, metric=metric)
    print(f"链接矩阵计算完成，用时: {time.time()-start_time:.2f}秒")

    # 如果指定了k，则进行聚类划分
    if k is not None:
        cluster_labels = fcluster(Z, t=k, criterion='maxclust')
        return cluster_labels, Z
    return None, Z

def plot_dendrogram(Z, k=None, title=None):
    """绘制树状图"""
    plt.figure(figsize=(12, 6))
    if k is not None:
        dendrogram(Z,
                  truncate_mode='lastp',
                  p=k,
                  show_leaf_counts=True,
                  leaf_rotation=90,
                  leaf_font_size=12,
                  show_contracted=True)
        plt.axhline(y=Z[-k, 2], color='r', linestyle='--')
    else:
        dendrogram(Z)

    plt.title(title or f'层次聚类树状图 (method={method})')
    plt.xlabel('样本索引')
    plt.ylabel('距离')
    plt.grid(False)
    plt.tight_layout()
    plt.show()

def visualize_hierarchical_clusters(data, labels, k, perplexity=30, random_state=42):
    """
    使用t-SNE降维可视化层次聚类结果
    
    参数:
        data: 标准化后的数据 (n_samples, n_features)
        labels: 聚类标签 (n_samples,)
        k: 聚类数量
        perplexity: t-SNE的困惑度参数(建议5-50)
        random_state: 随机种子
    """
    print("\n正在执行t-SNE降维...")
    start_time = time.time()
    
    # t-SNE降维
    tsne = TSNE(n_components=2,
                perplexity=perplexity,
                random_state=random_state,
                init='pca',
                learning_rate='auto',
                n_iter=1000)
    
    data_2d = tsne.fit_transform(data)
    print(f"t-SNE降维完成，用时: {time.time()-start_time:.2f}秒")
    print(f"最终KL散度: {tsne.kl_divergence_:.3f} (越小表示降维效果越好)")

    # 设置颜色和样式
    colors = plt.colormaps['tab20'].resampled(k)
    plt.figure(figsize=(12, 8))

    # 绘制聚类结果
    for i in range(1, k+1):  # 层次聚类的标签从1开始
        cluster_points = data_2d[labels == i]
        plt.scatter(cluster_points[:, 0], cluster_points[:, 1],
                   color=colors(i-1),
                   label=f'簇 {i} (n={len(cluster_points)})',
                   alpha=0.7,
                   edgecolors='w',
                   s=60,
                   linewidths=0.5)

    # 添加图例和标签
    plt.title(f'层次聚类结果 (t-SNE降维)\n困惑度={perplexity}', pad=20)
    plt.xlabel('t-SNE维度1')
    plt.ylabel('t-SNE维度2')
    
    # 改进图例显示
    leg = plt.legend(loc='best', frameon=True)
    leg.get_frame().set_facecolor('white')
    leg.get_frame().set_alpha(0.8)
    
    # 添加网格和调整布局
    plt.grid(True, linestyle='--', alpha=0.2)
    plt.tight_layout()
    
    # 显示质量评估
    silhouette = silhouette_score(data, labels)
    plt.figtext(0.5, 0.01, 
               f"轮廓系数: {silhouette:.3f} | KL散度: {tsne.kl_divergence_:.3f}",
               ha="center",
               fontsize=10,
               bbox={"facecolor":"white", "alpha":0.7, "pad":5})
    
    plt.show()
if __name__ == "__main__":
    method = 'ward'  # 链接方法
    metric = 'euclidean'  # 距离度量

    # 先计算链接矩阵
    _, Z = hierarchical_clustering(features_scaled, k=None, method=method, metric=metric)

    # 自动寻找最优K
    silhouette_scores = []
    k_range = range(2, min(11, len(features_scaled)))  # 防止样本过少
    print("\n正在评估不同K值对应的Silhouette分数:")
    for k in k_range:
        cluster_labels = fcluster(Z, t=k, criterion='maxclust')
        if len(set(cluster_labels)) > 1:
            score = silhouette_score(features_scaled, cluster_labels)
            silhouette_scores.append(score)
            print(f"k = {k} -> silhouette_score = {score:.4f}")
        else:
            silhouette_scores.append(-1)
            print(f"k = {k} -> 无法计算有效轮廓系数（类别数不足）")

    # 选出最优K
    best_k_index = np.argmax(silhouette_scores)
    best_k = k_range[best_k_index]
    best_score = silhouette_scores[best_k_index]
    print(f"\n✅ 最优K值为 {best_k}，对应的Silhouette Score为 {best_score:.4f}")

    # 最终聚类 + 可视化
    cluster_labels = fcluster(Z, t=best_k, criterion='maxclust')
    silhouette = silhouette_score(features_scaled, cluster_labels)
    print(f"\n最终轮廓系数: {silhouette:.3f}")

    # 1. 绘制树状图
    plot_dendrogram(Z, k=best_k, title=f'层次聚类树状图 (k={best_k}, method={method})')

    # 2. 可视化聚类结果
    visualize_hierarchical_clusters(features_scaled, cluster_labels, best_k)