# -*- coding:utf-8 -*-

# @Time    : 2018/10/31 4:52 PM

# @Author  : Swing


import pandas as pd
import numpy as np

from sklearn.model_selection import train_test_split

from sklearn.cluster import MiniBatchKMeans
from sklearn.decomposition import PCA

from sklearn.metrics import calinski_harabaz_score, silhouette_score
import matplotlib.pyplot as plt

from sklearn.preprocessing import StandardScaler, MinMaxScaler


def load_data():
    _data = pd.read_csv('usefulEvents.csv')

    print(_data.info())
    return _data


data = load_data()

data = data.drop(['event_id', 'user_id', 'start_time', 'city', 'state', 'zip', 'country', 'lat', 'lng'], axis=1)

# 标准化
# ss = StandardScaler()
# data = ss.fit_transform(data)

# 归一化
# sm = MinMaxScaler()
# data = sm.fit_transform(data)

# 降维
# pca = PCA(n_components=0.75, random_state=10)
# data = pca.fit_transform(data)
#
# print('PCA: ', data.shape)

# n_clusters = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
n_clusters = [2, 3, 4, 5, 6, 7, 8, 9, 10]

ch_scores = []

sh_scores = []

for n in n_clusters:

    mbk = MiniBatchKMeans(n_clusters=n, n_init=10, random_state=10)
    mbk.fit(data)

    ch_score = calinski_harabaz_score(data, mbk.predict(data))

    sh_score = silhouette_score(data, mbk.predict(data))

    ch_scores.append(ch_score)

    sh_scores.append(sh_score)

    print('n_Cluster: ', n, ' CH score: ', ch_score, ' SH score: ', sh_score)

plt.figure()
plt.plot(n_clusters, np.array(ch_scores), 'b-')
plt.xlabel('n_Cluster')
plt.ylabel('ch_score')
plt.title("1")
plt.show()
plt.close()

plt.figure()
plt.plot(n_clusters, np.array(sh_scores), 'b-')
plt.xlabel('n_Cluster')
plt.ylabel('sh_score')
plt.title("12")
plt.show()
plt.close()



pass