# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in 

import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import matplotlib.cm as cm
# import seaborn as sns
from wordcloud import WordCloud
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans
from sklearn.cluster import MiniBatchKMeans
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE

df = pd.read_csv('clean.csv', index_col=0)
print('原始数据行数： ', df.shape)
# df.fillna('', inplace=True)  # 不改变行数，清洗后空白数据保留
# df.dropna(inplace=True)  # 改变行数，干掉空白数据
df.head()

vectorizer = TfidfVectorizer(stop_words='english',
                                ngram_range=(1,2),sublinear_tf=True, use_idf=True,
#                              max_df=0.8, ## 去掉高频口水词
#                              min_df=0.001, ## 去掉低频罕见词，或者错误拼写的少数派
                             max_features=20000
                            )
X = vectorizer.fit_transform(df['text'])
print('tfidf shape: ', X.shape)  # (140101, 1458)
vectorizer.idf_


# range of number of clusters
num_clusters = range(2, 22, 2)

# list to record sum of squared distances
sum_square_error = []

# iterate through different number of clusters and append sse
for k in num_clusters:
        sum_square_error.append(MiniBatchKMeans(n_clusters=k, init_size=1024, batch_size=2048, random_state=42).fit(X).inertia_)
        print('now fitting {} clusters using  Mini batch K-means algorithm'.format(k))

# plot ssm vs k
plt.figure(figsize=(12, 5))
plt.plot(num_clusters, sum_square_error, "g^-")
plt.xticks(num_clusters)
plt.xlabel('Number of Clusters')
plt.ylabel("Sum of Square Distance")
plt.title('Elbow Method')
plt.show()

# create the models and fit
cluster_predictions = KMeans(n_clusters=8,random_state=42).fit_predict(X)
# cluster_predictions = MiniBatchKMeans(n_clusters=16, init_size=1024, batch_size=2048, random_state=42).fit_predict(X)


def get_top_keywords(data, clusters, labels, n_terms):
    '''
    This function displays the top keywords based on tf-idf score.
    '''
    # group tf-idf array based on predictions
    df = pd.DataFrame(data.todense()).groupby(clusters).mean()

    # loop through each clusters and print top 10 score words
    for i, r in df.iterrows():
        print('\nCluster {}'.format(i + 1))
        print(','.join([labels[t] for t in np.argsort(r)[-n_terms:]]))


# run the code
get_top_keywords(X, cluster_predictions, vectorizer.get_feature_names(), 10)