import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
import os
from PIL import Image
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.manifold import LocallyLinearEmbedding
from sklearn.manifold import MDS
from sklearn.datasets import load_digits
from scipy.linalg import svd
from sklearn import datasets
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import confusion_matrix
from sklearn.cluster import Birch
from sklearn.cluster import DBSCAN
from sklearn.preprocessing import StandardScaler
import csv
from sklearn.cluster import KMeans
path = "./data\\"
in_path=path+"Brats\\"
small_path = path + "smallBrats\\"
csv_path=path+"Brats_test.csv"
output_image_path=path + "middleBrats\\"
# # #------------------------缩小图像-------------------------------------#
# for root, dirs, files in os.walk(in_path):
#         for file_name in files:
#             print(file_name)
#             im = Image.open(in_path+file_name)
#             resized_image = im.resize((80,80))
#             resized_image.save(output_image_path+file_name)
# #------------------------生成csv文件-------------------------------------#
# if not os.path.exists(csv_path):
#  with open(csv_path,'a', newline='') as csvfile:
#     for root, dirs, files in os.walk(output_image_path):#in_path  output_image_path
#             for file_name in files:
#                 # print(file_name)
#                 im = Image.open(output_image_path+file_name)
#                 data=np.array(im).flatten()
#                 # newdata=np.resize(np.array(im),(40,40))
#                 # data=newdata.flatten()
#                 writer = csv.writer(csvfile)
#                 writer.writerow(data)
# #------------------------由csv文件生成图片-------------------------------------#
# Y = np.loadtxt(path + "Brats_test1.csv", dtype=int, delimiter=",")
# (n, m) = Y.shape
# X = 255 * np.ones(Y.shape) - Y  # 黑白颠倒
# picture_path1 = path + "ori_pictures\\"
# picture_path2 = path + "new_pictures\\"
# if not os.path.exists(picture_path1):
#     os.makedirs(picture_path1)
# if not os.path.exists(picture_path2):
#     os.makedirs(picture_path2)
# for i in range(0, n): #生成原图
#     data = np.reshape(Y[i, 1:m], (80, 80))
#     data=data[15:65,15:65]
#     im = Image.fromarray(data.astype(np.uint8)) #00000000~111111111 0~255
#     im.save(picture_path1 + "im_"+str(i) + ".png")
#     if i % 100 == 0:
#         print(i)
# for i in range(0, n): #生成黑白颠倒图
#     data = np.reshape(X[i, 1:m], (80, 80))
#     data = data[15:65, 15:65]
#     im = Image.fromarray(data.astype(np.uint8)) #00000000~111111111 0~255
#     im.save(picture_path2 + "im_"+str(i) + ".png")

# # #------------------------生成小图片-------------------------------------#
# picture_path2 = path + "new_pictures\\"
# eta=0.4
# for root, dirs, files in os.walk(picture_path2):
#         for file_name in files:
#             print(file_name)
#             im = Image.open(picture_path2+file_name)
#             (x, y) = im.size
#             s_img = im.resize((int(x*eta), int(y*eta)), Image.LANCZOS) #抗锯齿
#             s_img.save(small_path+file_name)
# #----------------------------------降维1.pca降维--------------------------------------------#
# X = np.loadtxt(path+"Brats_test1.csv", dtype=int, delimiter=",") # 导入数据
# (n, m) = X.shape # 获取数据信息
# pca = PCA(n_components=50) # 从原始数据中保留前50个特征
# pca_y = pca.fit_transform(X)
# t_sne = TSNE(n_components=2) # 将高维数据映射到低维空间
# Y = t_sne.fit_transform(pca_y)  # 用t-SNE得到最后的二维坐标
# np.savetxt(path+"y.csv", Y, fmt="%f", delimiter=",")
#
# pca=PCA(n_components=2) # 用pca降为两维（保留2个特征），作为横纵坐标用于后面的聚类，以便跟t-SNE方法对比
# pca_result=pca.fit_transform(X)
# pca1=pd.DataFrame(pca_result)
# pca1.to_csv(path+'PCA_for_km.csv')#将降维后的数据输出到csv，所用数据去除了降维数据的行标和列标。

# # #----------------------------------降维2.lle局部线性嵌入降维--------------------------------------------#
# X = np.loadtxt(path+"Brats_test1.csv", dtype=int, delimiter=",") # 导入数据
# # 二维降维
# lle = LocallyLinearEmbedding(n_neighbors=5,n_components=2)# 邻域大小为5，降维后的目标维度为2
# lle_result = lle.fit_transform(X)
# lle1 = pd.DataFrame(lle_result)
# lle1.to_csv(path+'LLE_for_km.csv')#将降维后的数据输出到csv，所用数据去除了降维数据的行标和列标。

# #----------------------------------降维3.MDS多维缩放降维--------------------------------------------#
# X = np.loadtxt(path+"Brats_test1.csv", dtype=int, delimiter=",") # 导入数据
# # MDS降维到二维
# mds = MDS(n_components=2, dissimilarity='euclidean', random_state=0) # 降维到2维，数据点的距离为欧几里得距离
# X_mds = mds.fit_transform(X)
# mds_result = mds.fit_transform(X_mds)
# mds1 = pd.DataFrame(mds_result)
# mds1.to_csv(path+'MDS_for_km.csv') # 将降维后的数据输出到csv，所用数据去除了降维数据的行标和列标。

#----------------------------------降维4.SVD奇异值分解降维--------------------------------------------#
# X = np.loadtxt(path+"Brats_test1.csv", dtype=int, delimiter=",") # 导入数据
# # 二维降维
# # 先构造矩阵，执行SVD降维
# U, s, VT = svd(X.data)
# # 选择前k个奇异值来近似原始矩阵
# k = 2
# svd_result = np.dot(U[:, :k], np.dot(np.diag(s[:k]), VT[:k, :])) # 重构降维后的数据
# svd1 = pd.DataFrame(svd_result)
# svd1.to_csv(path+'SVD_for_km.csv') # 将降维后的数据输出到csv，所用数据去除了降维数据的行标和列标。


# ----------------------------K-Means聚类-------------------------------------------#
# data = pd.read_csv(path+'SVD_for_km.csv') # 导入降维后的数据集
# km = KMeans(n_clusters=9).fit(data) #K-Means方法聚类为九类
# # #将聚类后的标签作为新的一列特征加入到原始数据集中
# data['cluster'] = km.labels_
# colors = np.array(['red','green','blue','black','yellow','cyan','magenta','violet', 'orange']) # 创建与聚类数量相等的颜色数组
# plt.scatter(data['0'],data['1'],c=colors[data['cluster']]) # 对前两列数据进行可视化
# plt.show() # 绘图

# ----------------------------------基于numpy数据画t-SNE聚类散点图------------------------------------#
# Y = np.loadtxt(path + "y.csv", dtype=float, delimiter=",")
# (n, m) = Y.shape
# fig, ax = plt.subplots()
# colors = np.array(['red','green','blue','black','yellow','cyan','magenta','violet', 'orange'])
# ax.scatter(Y[0:47, 0], Y[0:47, 1],c=colors[0])
# ax.scatter(Y[48:88, 0], Y[48:88, 1],c=colors[1])
# ax.scatter(Y[89:145, 0], Y[89:145, 1],c=colors[2])
# ax.scatter(Y[146:172, 0], Y[146:172, 1],c=colors[3])
# ax.scatter(Y[173:228, 0], Y[173:228, 1],c=colors[4])
# ax.scatter(Y[229:281, 0], Y[229:281, 1],c=colors[5])
# ax.scatter(Y[282:330, 0], Y[282:330, 1],c=colors[6])
# ax.scatter(Y[331:410, 0], Y[331:410, 1],c=colors[7])
# ax.scatter(Y[411:443, 0], Y[411:443, 1],c=colors[8])
# plt.show()


# #----------------------------------画t-SNE聚类带图标散点图------------------------------------#
# def get_image(path):
#     return OffsetImage(plt.imread(path)) #创建图片元素
#
# Y = np.loadtxt(path + "y.csv", dtype=float, delimiter=",")
# (n, m) = Y.shape
# fig, ax = plt.subplots()
#
# ax.scatter(Y[:, 0], Y[:, 1])
# plt.set_cmap(cm.gray)  # 修改颜色映射,注释掉看看
#
# for i in range(0, n):
#     ab = AnnotationBbox(get_image(small_path + "im_"+str(i) + ".png"), (Y[i, 0], Y[i, 1]), frameon=True)
#     ax.add_artist(ab)
# ax = plt.gca()
# ax.set_aspect(1)
# plt.show()
# # fig.savefig("mnist_scatter.png")

# -*- coding: utf-8 -*-

# #---------------------------------层次聚类------------------------------------#
# data = pd.read_csv(path+'SVD_for_km.csv') # 导入数据
# model = AgglomerativeClustering(n_clusters=9).fit(data)
# # # 获取聚类标签，并添加到原数据集
# data['cluster'] = model.labels_
# colors = np.array(['red','green','blue','black','yellow','cyan','magenta','violet', 'orange'])
# plt.scatter(data['0'],data['1'],c=colors[data['cluster']])
# plt.show()


# #----------------------------------BIRCH聚类------------------------------------#
# data = pd.read_csv(path+'SVD_for_km.csv') # 导入数据
# # # BIRCH聚类
# brc = Birch(n_clusters=9, threshold=0.5)
# brc.fit_transform(data)
# # # 获取聚类标签，并添加到原数据集
# data['cluster'] = brc.labels_
# colors = np.array(['red','green','blue','black','yellow','cyan','magenta','violet', 'orange'])
# plt.scatter(data['0'],data['1'],c=colors[data['cluster']])
# plt.show()



# # #----------------------------------DBSCAN聚类------------------------------------#
# X = pd.read_csv(path + 'SVD_for_km.csv')  # 导入数据
# # 数据标准化，以便更好地进行聚类计算
# scaler = StandardScaler()
# X = scaler.fit_transform(X)  # 对数据进行拟合和转换，使数据均值为0，标准差为1
# # 设定DBSCAN模型
# db = DBSCAN(eps=0.3, min_samples=5)  # 设置邻域半径eps为0.3，最小邻点数min_samples为5
# # 执行DBSCAN聚类
# db.fit(X)
# labels = db.labels_  # 获取聚类标签，每个数据点的标签代表其所属的簇或噪声点（-1）
# # 绘制聚类结果
# unique_labels = set(labels)  # 获取所有唯一的聚类标签（包括噪声点）
# colors = [plt.cm.Spectral(each)  # 使用Spectral颜色映射为每个标签生成颜色
#           for each in np.linspace(0, 1, len(unique_labels))]  # 根据唯一标签的数量在颜色映射上均匀取点
# for k, col in zip(unique_labels, colors):  # 遍历每个唯一的标签和对应的颜色
#     if k == -1:
#         # 噪声点使用黑色表示
#         col = [0, 0, 0, 1]  # 黑色，这里使用了RGBA颜色格式，最后一个值为透明度
#     class_member_mask = (labels == k)  # 生成一个布尔掩码，用于筛选出标签为k的数据点
#     xy = X[class_member_mask]  # 使用掩码从数据集中筛选出标签为k的数据点
#     plt.plot(xy[:, 0], xy[:, 1], 'o',  # 在图上绘制数据点，使用圆形标记
#              markerfacecolor=tuple(col),  # 设置标记的填充颜色为之前计算的颜色
#              markeredgecolor='k',  # 设置标记的边缘颜色为黑色
#              markersize=6)  # 设置标记的大小
# plt.title('DBSCAN Clustering')
# plt.show()  # 显示图形