import pandas as pd
import numpy as np
from sklearn.utils import shuffle
from sklearn.cluster import KMeans
from sklearn.cluster import AgglomerativeClustering  #层次聚类
import matplotlib.pyplot as plt
from sklearn.cluster import MeanShift
from sklearn.cluster import DBSCAN
from sklearn.decomposition import PCA
from sklearn.preprocessing import MinMaxScaler

def draw(dataSet,labels,titles):
    plt.figure()
    plt.suptitle(titles)
    plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签\
    group = []
    X = []
    Y = []
    for data,i in zip(dataSet,labels):
        if i not in group:
            group.append(i)
            x_group = []
            x_group.append(data[0])
            y_group = []
            y_group.append(data[1])
            X.append(x_group)
            Y.append(y_group)
        else:
            X[group.index(i)].append(data[0])
            Y[group.index(i)].append(data[1])
    for x,y in zip(X,Y):
        plt.scatter(x,y)
    plt.legend(loc="upper right")  # 显示图中的标签
    plt.xlabel("数据编号")
    plt.ylabel('结果分类')
    plt.show()

def clipData(Data,row,lables):
    resaultList = []
    for i in range(row):
        if Data.loc[i,'activity'] in lables:
            resaultList.append(np.array(Data.loc[i,:]).tolist())
    data = pd.DataFrame(resaultList)
    data.columns = ['SequenceName', 'Tagidentificator', 'timestamp', 'dateFORMAT', 'x_coordinate', 'y_coordinate','z_coordinate', 'activity']
    return data

#标准化
def myTransform(myData):
    minMax = MinMaxScaler()
    myData = minMax.fit_transform(myData)
    return myData

#降维
def dimensionality_reduction(Data):
    pca = PCA(n_components=2)
    return pca.fit_transform(Data)

def loadData(fileName):
    Data = pd.read_csv(fileName, names=['SequenceName', 'Tagidentificator', 'timestamp', 'dateFORMAT', 'x_coordinate',
                                        'y_coordinate', 'z_coordinate', 'activity'])
    Data.dropna(axis=0)
    row, columns = Data.shape
    Data = clipData(Data, row, ['sitting on the ground', 'sitting', 'lying down'])
    Data = shuffle(Data)
    dataSet = Data.iloc[:, 4:7]
    dataSet = dimensionality_reduction(dataSet)
    dataSet = myTransform(dataSet)
    print(dataSet)
    return dataSet


# KMeans算法
def myKMeans(dataSet, n_clusters):
    kmeans = KMeans(n_clusters)  # n_clusters:number of cluster
    kmeans.fit(dataSet)
    draw(dataSet, kmeans.labels_, titles="KMeans算法聚类效果图")
#层次聚类
def myAgglomerativeClustering(dataSet):
    ac = AgglomerativeClustering(n_clusters=2, affinity='euclidean', linkage='complete')
    myac = ac.fit_predict(dataSet)
    #ac.fit_predict(dataSet)
    draw(dataSet,myac,titles="层次算法聚类效果图")

def myMeanShift(dataSet):
    meanshift = MeanShift(bandwidth=0.1)  # 带宽
    meanshift.fit(dataSet)
    draw(dataSet, meanshift.labels_, titles='MeanShift算法聚类效果图')


def myDBSCAN(dataSet):
    db = DBSCAN(eps=0.3, min_samples=10 ,metric='euclidean')
    db.fit(dataSet)
    draw(dataSet, db.labels_, titles='DBSCAN算法聚类效果图')


if __name__ == "__main__":
    fileName = 'ConfLongDemo_JSI.csv'
    dataSet = loadData(fileName)
    myKMeans(dataSet,4)
    #myAgglomerativeClustering(dataSet)
    #myMeanShift(dataSet)
    myDBSCAN(dataSet)