#导入必要的工具包
import pandas as pd
import numpy as np
from sklearn.cluster import MiniBatchKMeans
from sklearn.model_selection import train_test_split
from sklearn import metrics

from sklearn.decomposition import PCA
import time

import matplotlib.pyplot as plt

# 统计训练集中有多少不同的用户的events
uniqueUsers = set()
uniqueEvents = set()

for filename in ["train.csv", "test.csv"]:
    f = open(filename, 'r')
    # 忽略第一行（列名字）
    f.readline().strip().split(",")
    for line in f:  # 对每条记录
        cols = line.strip().split(",")
        uniqueUsers.add(cols[0])  # 第一列为用户ID
        uniqueEvents.add(cols[1])  # 第二列为活动ID
        # eventsForUser[cols[0]].add(cols[1])    #该用户参加了这个活动
        # usersForEvent[cols[1]].add(cols[0])    #该活动被用户参加
    f.close()
n_uniqueUsers = len(uniqueUsers)
n_uniqueEvents = len(uniqueEvents)
print("number of uniqueUsers :%d" % n_uniqueUsers)
print("number of uniqueEvents :%d" % n_uniqueEvents)

f_events = open("events.csv", 'r')
f_new_events=open('csvtest.csv', 'w')
f_new_events.write(f_events.readline())
for line in f_events:  # 对每条记录
    cols = line.strip().split(",")
    if(cols[0] in uniqueEvents):
        f_new_events.write(f_events.readline())
f_events.close()
f_new_events.close()

#读取训练数据
X_train = pd.read_csv('csvtest.csv')
print(X_train.columns)
print(X_train.shape)
drop_list=['event_id', 'user_id', 'start_time', 'city', 'state', 'zip', 'country','lat', 'lng']
X_train=X_train.drop(drop_list,axis=1)
print(X_train.columns)
print(X_train.shape)
# 将训练集合拆分成训练集和校验集，在校验集上找到最佳的模型超参数（PCA的维数）
X_train_part, X_val = train_test_split(X_train, train_size = 0.8,random_state = 0)

from sklearn.preprocessing import MinMaxScaler

# 数值型特征用归一化，归一化之后再用多项式拟合，类别特征用热编码，区分开来，在模型训练之前要合并
ss_X = MinMaxScaler()
# ss_y = StandardScaler()
# one_hot_encoder = OneHotEncoder()
# poly = PolynomialFeatures(degree=3)
# 分别对训练和测试数据的特征以及目标值进行标准化处理
X_value_train = ss_X.fit_transform(X_train_part)
X_value_test = ss_X.transform(X_val)
#拆分后的训练集和校验集的样本数目
print(X_train_part.shape)
print(X_val.shape)


# 一个参数点（聚类数据为K）的模型，在校验集上评价聚类算法性能
def K_cluster_analysis(K, X_train,X_val):
    start = time.time()

    print("K-means begin with clusters: {}".format(K));

    # K-means,在训练集上训练
    mb_kmeans = MiniBatchKMeans(n_clusters=K)
    mb_kmeans.fit(X_train)

    # 在训练集和测试集上测试
    # y_train_pred = mb_kmeans.fit_predict(X_train)
    # y_val_pred = mb_kmeans.predict(X_val)

    # 以前两维特征打印训练数据的分类结果
    # plt.scatter(X_train[:, 0], X_train[:, 1], c=y_pred)
    # plt.show()

    # K值的评估标准
    # 常见的方法有轮廓系数Silhouette Coefficient和Calinski-Harabasz Index
    # 这两个分数值越大则聚类效果越好
    CH_score = metrics.calinski_harabaz_score(X_train,mb_kmeans.predict(X_train))
    # CH_score = metrics.silhouette_score(X_train, mb_kmeans.predict(X_train))

    # 也可以在校验集上评估K
    # v_score = metrics.v_measure_score(y_val, y_val_pred)

    end = time.time()
    print("CH_score: {}, time elaps:{}".format(CH_score, int(end - start)))
    # print("v_score: {}".format(v_score))

    return CH_score
Ks = [i*10 for i in range(1,11)]
print(Ks)
CH_scores = []
for K in Ks:
    ch= K_cluster_analysis(K, X_train_part, X_val)
    CH_scores.append(ch)
plt.plot(Ks, np.array(CH_scores), 'b-')
plt.show()