import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
plt.rcParams['font.sans-serif']=['SimHei']
plt.rcParams['axes.unicode_minus'] = False
import seaborn as sns
import jieba
from sklearn.preprocessing import normalize
from sklearn.cluster import MiniBatchKMeans
from sklearn import metrics

sns.set()

data=pd.read_csv("./data/training.csv",names=['category','content'],encoding='utf-8',header=None)
data.head()
print(data.shape)

contents=data.content.values.tolist()# 使用jieba分词器要求传入的数据格式为list

content_s=[]
for line in contents:
    current_segment=jieba.lcut(line)
    if len(current_segment)>1 and current_segment!="\n":
        content_s.append(current_segment)

print(np.array(content_s).shape)
df_content=pd.DataFrame({'content_s':content_s})
print(df_content.head())
# 导入停用词库
stopwords=pd.read_csv('./data/stopwords.txt',index_col=False,sep='\t',quoting=3,names=['stopwords'],encoding='UTF-8')
print(stopwords.head())

# 去掉停用词
# 自定义一个函数
def drop_stopwords(contents,stopwords):
    content_clean=[]
    all_words=[]
    for line in contents:
        line_clean=[]
        for word in line:
            if word  in stopwords:
                continue
            line_clean.append(word)
            all_words.append(str(word))
        content_clean.append(line_clean)
    return content_clean,all_words

contents=df_content.content_s.values.tolist()
stopwords=stopwords.stopwords.values.tolist()
content_clean,all_words=drop_stopwords(contents,stopwords)

df_content1=pd.DataFrame({'contents_clean':content_clean})
df_content1.head()

# 特征提取，准备数据
X_train=df_content1.contents_clean
words=[]
for line_index in range(len(X_train)):
    try:
        words.append(' '.join(X_train[line_index]))# 将数据格式转化成字符串的格式，因为TfidfVectorizer算法要求是这样的格式
    except:
        print(line_index)
words[0]

# 导入工具包,构建tfidf特征
from sklearn.feature_extraction.text import TfidfVectorizer
Vectorizer=TfidfVectorizer(analyzer='word',max_features=20,lowercase=False)
X=Vectorizer.fit_transform(words).toarray()
print(X.shape)
new_train=pd.DataFrame(columns=Vectorizer.get_feature_names(),data=X)
print(new_train.head())

#读取数据
x_train=new_train
# 因为要计算样本之间的距离
# 对每个样本数据进行归一化，每个样本的模长为1.
normalize(x_train, norm="l2", copy=False)


# KMeans聚类
# 一个参数点（聚类数据为K）的模型
def K_cluster_analysis(K, X):
    print("K-means begin with clusters: {}".format(K));

    # K-means,在训练集上训练
    mb_kmeans = MiniBatchKMeans(n_clusters=K)
    y_pred = mb_kmeans.fit_predict(X)  # 这里是fit_predict方法

    # K值的评估标准
    # 本案例中训练数据有标签，可采用有参考模型的评价指标
    # v_score = metrics.v_measure_score(y_val, y_val_pred)

    # 亦可采用无参考默的评价指标：轮廓系数Silhouette Coefficient和Calinski-Harabasz Index
    # 这两个分数值越大则聚类效果越好
    CH_score = metrics.calinski_harabaz_score(X, y_pred)

    # 轮廓系数Silhouette Coefficient在大样本时计算太慢
    # si_score = metrics.silhouette_score(X, y_pred)

    print("CH_score: {}".format(CH_score))
    # print("si_score: {}".format(si_score))

    return CH_score  # ,si_score

# 设置超参数（聚类数目K）搜索范围
Ks = [5,10,15, 20, 25,30,35,40,45,50]
CH_scores = []
#si_scores = []
for K in Ks:
    ch = K_cluster_analysis(K, x_train)
    CH_scores.append(ch)
    #si_scores.append(si)

# 绘制不同K对应的聚类的性能，找到最佳模型／参数（分数最高）
plt.plot(Ks, np.array(CH_scores), 'b-',label = 'CH_scores')

### 最佳超参数
index = np.unravel_index(np.argmax(CH_scores, axis=None), len(CH_scores))
Best_K = Ks[ index[0]]

print(Best_K)
plt.show()