import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
import matplotlib.pyplot as mp


# 读取数据
def get_data_zs(inputfile):
    df = pd.read_excel(r'C:\Users\liutong\Documents\Tencent Files\707921012\FileRecv\整合学生.xlsx')
    cols = [i for i in df.columns if i not in ['性别', '专业', '序号', 'Unnamed: 36', '测验工具', '测验用时', '院系']]  # 设置目标列的列表
    df = df[cols].fillna(df.mean())  # 缺失值处理，以均值代替非数
    # df = df.where((df['自杀意念'].notna()), df.mean()['自杀意念'])
    df2 = df.replace('男', 0).replace('女', 1)  # 以0，1替换男女
    a = 0  # 设置变量a
    # 变量a自增替换专业
    for n in list(set(np.array(df['入学年份']))):
        df2 = df2.replace(n, a)
        a += 1
    cols = [i for i in df.columns if
            i not in ['总分', '总均分', '阳性项目数', '序号', 'Unnamed: 36', 'Unnamed: 0', '测验工具', '测验用时', '院系']]  # 创建目标列的列表
    data = df2[cols]  # 创建目标列的数据框
    data_zs = 1.0 * (data - data.mean()) / data.std()  # data - data.mean()是去中心化
    return data, data_zs  # 返回目标数据与去中心化的数据


def model_data_zs(data, k, b):
    model = KMeans(n_clusters=k, n_jobs=4, random_state=7, max_iter=b)
    model.fit(data_zs)

    # 标准化数据及其类别
    r = pd.concat(
        [data_zs, pd.Series(model.labels_, index=data.index)], axis=1)
    # print(r.head())
    # 每个样本对应的类别
    r.columns = list(data.columns) + [u'聚类类别']  # 重命名表头
    return model, r, k


def make_norm(model, k):
    df = pd.read_excel(r'C:\Users\liutong\Documents\Tencent Files\707921012\FileRecv\整合学生.xlsx')
    cols = [i for i in df.columns if i not in ['性别', '专业', '序号', 'Unnamed: 36', '测验工具', '测验用时', '院系']]
    df = df[cols].fillna(df.mean())
    # df = df.where((df['自杀意念'].notna()), df.mean()['自杀意念'])
    df2 = df.replace('男', 0).replace('女', 1)
    a = 0
    for n in list(set(np.array(df['入学年份']))):
        df2 = df2.replace(n, a)
        a += 1
    cols = [i for i in df.columns if
            i not in ['总分', '总均分', '阳性项目数', '序号', 'Unnamed: 36', 'Unnamed: 0', '测验工具', '测验用时', '院系']]
    norm = []
    for i in range(k):
        norm_tmp = r[cols][
                       r[u'聚类类别'] == i] - model.cluster_centers_[i]
        norm_tmp = norm_tmp.apply(np.linalg.norm, axis=1)  # 求出绝对距离
        norm.append(norm_tmp / norm_tmp.median())  # 求相对距离并添加
    norm = pd.concat(norm)
    return norm


def draw_discrete_point(threshold):
    mp.rcParams['font.sans-serif'] = ['SimHei']
    mp.rcParams['axes.unicode_minus'] = False
    norm[norm <= threshold].plot(style='go')  # 正常点

    discrete_points = norm[norm > threshold]  # 离散点阈值
    discrete_points.plot(style='rs')
    # print(discrete_points)

    for i in range(len(discrete_points)):  # 离群点做标记
        id = discrete_points.index[i]
        n = discrete_points.iloc[i]
        mp.annotate('(%s,%0.2f)' % (id, n), xy=(id, n), xytext=(id, n))
    mp.xlabel(r'编号')
    mp.ylabel(r'相对距离')
    mp.savefig('离散点分析.png')
    mp.show()


if __name__ == '__main__':
    inputfile = 'data/consumption_data.xls'
    threshold = 2.3  # 离散点阈值
    k = 4  # 聚类类别
    b = 500  # 聚类最大循环次数
    data, data_zs = get_data_zs(inputfile)
    model, r, k = model_data_zs(data, k, b)
    norm = make_norm(model, k)
    draw_discrete_point(threshold)
    print('All Done')
