import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
import pandas as pd
import numpy as np
from sklearn.decomposition import PCA
from sklearn.metrics import silhouette_score


# 定义between_SS / total_SS 的计算方法
def sum_of_square_scores(original_data, predict_labels, cluster_centers, n_clusters):
    avg = np.mean(original_data, axis=0)
    dist = np.power(original_data - avg, 2)
    total_ss = np.sum(dist)
    within_squares = np.zeros((n_clusters, len(original_data[0])))
    for i in range(0, len(original_data)):
        cluster = predict_labels[i]
        within_squares[cluster] += np.power(original_data[i] - cluster_centers[cluster], 2)
    within_ss = np.sum(within_squares)
    return (total_ss - within_ss) / total_ss


# 这是kmeans聚类的代码，写好目标文件的地址进行处理即可。决策树图片保存在python文件所在目录。
pd.set_option('display.max_columns', None)  # 设置数据框打印显示列的长度
df = pd.read_excel(r'C:\Users\liutong\Documents\Tencent Files\707921012\FileRecv\整合学生.xlsx')  # 输入文件地址
cols = [i for i in df.columns if i not in ['性别', '专业', '序号', 'Unnamed: 36', '测验工具', '测验用时', '院系']]  # 创建目标列的列表
df = df[cols].fillna(df.mean())  # 缺失值处理，以均值代替非数
# df = df.where((df['自杀意念'].notna()), df.mean()['自杀意念'])
df2 = df.replace('男', 0).replace('女', 1)  # 以0，1替换男女
a = 0  # 设置变量a
# 变量a自增替换专业
for n in list(set(np.array(df['入学年份']))):
    df2 = df2.replace(n, a)
    a += 1
cols = [i for i in df.columns if
        i not in ['总分', '总均分', '阳性项目数', '序号', 'Unnamed: 36', 'Unnamed: 0', '测验工具', '测验用时', '院系']]  # 创建目标列的列表
df2 = df2[cols]  # 创建目标列的数据框
npdata = np.array(df2)  # 将数据框转换为矩阵
a = npdata[12][-5]  # 设置变量a储存均值
# 归一化处理，大于均值为2，小于均值为0，，等于均值为1
for n in range(len(npdata)):
    if npdata[n][-5] < a:
        npdata[n][-5] = 0
    elif npdata[n][-5] == a:
        npdata[n][-5] = 1
    else:
        npdata[n][-5] = 2

pca = PCA(n_components=2)  # 加载PCA算法，设置降维后主成分数目为2
reduced_x = pca.fit_transform(npdata)  # 对样本进行降维
list0 = []
# 运用批处理与lambda函数把所有数据保留三位小数
for b in reduced_x:
    list0.append(list(map(lambda x: round(x, 3), b)))
list0 = np.array(list0)
k = 3  # 聚类的类别
threshold = 2  # 离散点阈值
iteration = 500  # 聚类最大循环次数
y_pred = KMeans(n_clusters=k, n_jobs=4, max_iter=iteration).fit_predict(list0)  # 用于指定类群的颜色
kmeans = KMeans(n_clusters=k, n_jobs=4, max_iter=iteration).fit(list0)  # 用于评价
plt.scatter(list0[:, 0], list0[:, 1], c=y_pred, s=10)  # 设置散点图的x、y值，颜色，尺寸
plt.savefig('聚类散点.png')  # 保存图片在py文件所在目录
plt.show()  # 展示离散点图像
# 使用Silhouette Coefficient评价，就是轮廓系数，可以百度，很简单的
sil_coeff = silhouette_score(list0, kmeans.labels_, metric='euclidean')
# 使用between_ss / total_ss 评价
bss = sum_of_square_scores(list0, kmeans.labels_, kmeans.cluster_centers_, kmeans.n_clusters)
print("coeff:" + str(sil_coeff))  # 打印Silhouette Coefficient评价
print("between_ss / total_ss:" + str(bss))  # 打印between_ss / total_ss 评价
