from tensorflow.python.client import device_lib
# print(device_lib.list_local_devices())
# import numpy as np
# print(np.version)
# import tensorflow as tf
# tf.test.is_gpu_available()
# import random
#
#
# li = [x for x in range(100)]
# m = []
# for i in range(10):
#     a = random.randint(0,100)
#     m.append(a)
#     li.pop(a)
# print(li)
# print(m)
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn.cluster import AgglomerativeClustering
from numpy import mean
data = pd.read_excel('./data/cnew/data.xlsx')
print(data.head())
data = np.array(data)

# data_list = data.tolist()
X = data[:,11:18]
y = data[:,2]
leng = len(X)
data_list = data.tolist()


# print(X)
# print(y)
#数据归一化处理
from sklearn.preprocessing import StandardScaler
# X_std = StandardScaler().fit_transform(X)
# #构造协方差矩阵
# mean_vec = np.mean(X_std, axis=0)
# cov_mat = (X_std - mean_vec).T.dot((X_std - mean_vec)) / (X_std.shape[0]-1)
# print(cov_mat)
# #计算特征值和特征向量
# cov_mat = np.cov(X_std.T)
# eig_vals, eig_vecs = np.linalg.eig(cov_mat)
# #把特征向量和特征值对应起来，为计算特征值权重。
#
# eig_pairs = [(np.abs(eig_vals[i]), eig_vecs[:,i]) for i in range(len(eig_vals))]
# #按特征值排序，注意与原始数据无关
# eig_pairs.sort(key=lambda x: x[0], reverse=True)
# #对每个特征值对应的特征向量求累加和，分析特征值的权重
#
# tot = sum(eig_vals)
# var_exp = [(i / tot)*100 for i in sorted(eig_vals, reverse=True)]
# print (var_exp)
# cum_var_exp = np.cumsum(var_exp)
# matrix_w = np.hstack((eig_pairs[0][1].reshape(7,1),
# eig_pairs[1][1].reshape(7,1)))
# print('Matrix W:\n', matrix_w)
# Y = X_std.dot(matrix_w)
# print(Y)
# print(Y.shape)


def deal(X):
    X_std = StandardScaler().fit_transform(X)
    # 构造协方差矩阵
    mean_vec = np.mean(X_std, axis=0)
    cov_mat = (X_std - mean_vec).T.dot((X_std - mean_vec)) / (X_std.shape[0] - 1)
    print(cov_mat)
    # 计算特征值和特征向量
    cov_mat = np.cov(X_std.T)
    eig_vals, eig_vecs = np.linalg.eig(cov_mat)
    # 把特征向量和特征值对应起来，为计算特征值权重。

    eig_pairs = [(np.abs(eig_vals[i]), eig_vecs[:, i]) for i in range(len(eig_vals))]
    # 按特征值排序，注意与原始数据无关
    eig_pairs.sort(key=lambda x: x[0], reverse=True)
    # 对每个特征值对应的特征向量求累加和，分析特征值的权重

    tot = sum(eig_vals)
    var_exp = [(i / tot) * 100 for i in sorted(eig_vals, reverse=True)]
    print(var_exp)
    cum_var_exp = np.cumsum(var_exp)
    matrix_w = np.hstack((eig_pairs[0][1].reshape(7, 1),
                          eig_pairs[1][1].reshape(7, 1)))
    print('Matrix W:\n', matrix_w)
    Y = X_std.dot(matrix_w)
    print(Y)
    print(Y.shape)
    return Y
mm = [51,28,51,10,1,52,13]
data_M = X.tolist()
data_M.append(mm)
data_M = np.array(data_M)
data_M = deal(X)
ac = AgglomerativeClustering(n_clusters=300, linkage='ward')
ac.fit(data_M)
labels = ac.fit_predict(data_M)
print(labels)

dic = {}

length = len(labels)-1
for i in range(length):
    if labels[i] in dic:
        dic[labels[i]].append(data_list[i][2])
    else:
        dic[labels[i]] = []
        dic[labels[i]].append(data_list[i][2])

print(dic)
l_num = mean(dic[labels[length-1]])
print(l_num)
print(dic[labels[length-1]])
mmin = 100
mmax = 0
rendata = []
for i in dic[labels[length-1]]:
    rendata.append(data_list[i][2])
    if data_list[i][2]>mmax:
        mmax = data_list[i][2]
    if data_list[i][2]<mmin:
        mmin = data_list[i][2]

print(mmin,'-',mmax)
print(rendata)