
# coding: utf-8

# # HW4

# In[46]:


import pickle as p

import numpy as np

import pandas as pd


# ## 提取数据

# In[47]:


#读入处理过的数据

#每个用户参加的活动   / 每个活动参加的用户
eventsForUser=p.load(open('PE_eventsForUser.pkl','rb'))
usersForEvent=p.load(open('PE_usersForEvent.pkl','rb'))
#用户和活动的索引
userIndex=p.load(open('PE_userIndex.pkl','rb'))
eventIndex=p.load(open('PE_eventIndex.pkl','rb'))
#关联的用户 或者 关联的event
uniqueUserPairs=p.load(open('FE_uniqueUserPairs.pkl','rb'))
uniqueEventPairs=p.load(open('PE_uniqueEventPairs.pkl','rb'))


# In[48]:


#new_userIndex = {v : k for k, v in userIndex.items()}# 将user索引反转


# In[49]:


#根据索引和相关关系取出events里面和train，test相关的数据
fevents=open('events.csv','rb')
colums=fevents.readline().decode().strip().split(",")
data=[]
# for line in fevents:
#     cols = line.decode().strip().split(",")
#     if cols[0] in eventIndex.keys():
#         if cols[1] in userIndex.keys():
#             if userIndex[cols[1]] in usersForEvent[eventIndex[cols[0]]]:
#                 data.append(cols)
#data

#是不是只要event_id对上就行了额，两个都对上只能搜到5行
for line in fevents:
    cols = line.decode().strip().split(",")
    if cols[0] in eventIndex.keys():
        data.append(cols)


# In[50]:


train=pd.DataFrame(data,columns=colums)


# In[51]:


train.head()


# In[52]:


#根据题意只留下train和test下的c1～c_other的数据
dr=['event_id','user_id','start_time','city','state','zip','country','lat','lng']
train=train.drop(dr,axis=1)
train.info()


# In[53]:


#假设有缺失值的话，将缺失值用0代替
train.fillna(value=0)


# ## 聚类MiniBatch K-Means

# In[54]:


from sklearn.cluster import MiniBatchKMeans
from sklearn.model_selection import train_test_split
from sklearn import metrics

import time

import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')


# In[55]:


def K_cluster_analysis(K, X_train):
    start = time.time()
    
    print("K-means begin with clusters: {}".format(K))
    
    mb_kmeans = MiniBatchKMeans(n_clusters = K)
    
    mb_kmeans.fit(X_train)
    
    y_pre=mb_kmeans.predict(X_train)
    
    CH_score = metrics.silhouette_score(X_train,y_pre)
    
    end = time.time()
    
    print("CH_score: {}, time elaps:{}".format(CH_score, int(end-start)))
    
    return CH_score


# In[60]:


Ks=list(range(10,110,10))
CH_scores = []
for K in Ks:
    ch = K_cluster_analysis(K, train)
    CH_scores.append(ch)


# In[61]:


plt.plot(Ks, np.array(CH_scores), 'b-')


# 从CH评分的结果上来看好像k=10的时候，聚类结果最好，取K=10

# In[74]:


n_clusters = 10
mb_Minikmeans = MiniBatchKMeans(n_clusters = n_clusters)
mb_Minikmeans.fit(train)

y_train_pred = mb_Minikmeans.labels_
cents = mb_Minikmeans.cluster_centers_#质心


# In[75]:


#简单打印结果
r1 = pd.Series(y_train_pred).value_counts() #统计各个类别的数目
r2 = pd.DataFrame(cents) #找出聚类中心
r = pd.concat([r2, r1], axis = 1) #横向连接（0是纵向），得到聚类中心对应的类别下的数目
r.columns = list(train.columns) + [u'类别数目'] #重命名表头
#print(r)


# In[77]:


#对训练数据进行降维，方便绘图处理
from sklearn.manifold import TSNE
tsne = TSNE()
tsne.fit_transform(train) #进行数据降维
tsne = pd.DataFrame(tsne.embedding_, index = train.index) #转换数据格式


# In[90]:


p=pd.concat([tsne,pd.Series(y_train_pred, index = tsne.index)],axis =1)
p.columns=['x','y','clus']


# In[101]:


#取每种类型的前100个数据进行绘图
colors = ['b','g','r','k','c','m','y','#e24fff','#524C90','#845868']

fig=plt.figure()
ax1=fig.add_subplot(111)
for i in range(0,9):
    d = p[p.clus==i]
    d=d[0:100]
    ax1.scatter(d['x'], d['y'], c=colors[i])
plt.show()


# 从结果上看，聚类的效果还不错。

# ## 聚类 K-Means

# In[102]:


from sklearn.cluster import KMeans

def K_Means_analysis(K, X_train):
    start = time.time()
    
    print("K-means begin with clusters: {}".format(K))
    
    mb_kmeans = KMeans(n_clusters = K)
    
    mb_kmeans.fit(X_train)
    
    y_pre=mb_kmeans.predict(X_train)
    
    CH_score = metrics.silhouette_score(X_train,y_pre)
    
    end = time.time()
    
    print("CH_score: {}, time elaps:{}".format(CH_score, int(end-start)))
    
    return CH_score


# In[111]:


Ks_2=list(range(10,110,10))
CH_scores_2 = []
for K in Ks_2:
    ch = K_Means_analysis(K, train)
    CH_scores_2.append(ch)


# 从CH评分上看，用K-Means结果要好于MiniMatch K-Means的结果

# In[112]:


plt.plot(Ks_2, np.array(CH_scores_2), 'b-')


# In[106]:


n_clusters = 10
mb_kmeans = KMeans(n_clusters = n_clusters)
mb_kmeans.fit(train)

y_train_pred_2 = mb_kmeans.labels_
cents_2 = mb_kmeans.cluster_centers_#质心

p_2=pd.concat([tsne,pd.Series(y_train_pred_2, index = tsne.index)],axis =1)
p_2.columns=['x','y','clus']

colors = ['b','g','r','k','c','m','y','#e24fff','#524C90','#845868']

fig=plt.figure()
ax1=fig.add_subplot(111)
for i in range(0,9):
    d = p_2[p_2.clus==i]
    d=d[0:100]
    ax1.scatter(d['x'], d['y'], c=colors[i])
plt.show()


# 分布的结果图大致类似，不过好像没minimatch的好看，另外试了一下，好像K=5左右的时候聚类效果更好。
