# 代码4-7
import re
import os
import json
import jieba
import pandas as pd
from sklearn.cluster import KMeans
import joblib
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
import random
from sklearn import metrics

# 数据读取
data = pd.read_csv('./data/new.csv', encoding='utf-8', header=None)
data.columns = ['news', 'label']

data_dup = data.drop_duplicates()
data_qumin = data_dup.copy()
data_qumin['news'] = data_dup['news'].astype('str').apply(lambda x: re.sub('x', '', x))

# 代码4-8
# 数据预处理
stopword = pd.read_csv('./data/stopword.txt', sep='ooo', encoding='gbk', header=None, engine='python')
stopword = [' '] + list(stopword[0])

dict_file = pd.read_csv('./data/newdic1.txt')
jieba.load_userdict(dict_file)

data_cut = data_qumin.copy()
data_cut['news'] = data_qumin['news'].astype('str').apply(lambda x: list(jieba.cut(x)))
data_cut['news'] = data_cut['news'].apply(lambda x: [i for i in x if i not in stopword])

data_cut['news'] = data_cut['news'].apply(lambda x: [''.join(re.findall('[\u4e00-\u9fa5]', i)) for i in x])
data_cut['news'] = data_cut['news'].apply(lambda x: [i for i in x if len(i) > 1])

data_qustop = pd.DataFrame()
ind = [len(i) > 1 for i in data_cut.iloc[:, 0]]
data_qustop = data_cut.loc[ind, :]

# 划分训练集和测试集
reps = {'教育': '1', '体育': '2', '健康': '3', '旅游': '4'}
data_qustop['label'] = data_qustop['label'].map(lambda x: reps.get(x))

corpus = []
for i in data_qustop.iloc[:, 0]:
    temp = ' '.join(i)
    corpus.append(temp)
print(len(corpus))
train_corpus = corpus[:1600]
test_corpus = corpus[1600:]
# print(test_corpus)
# 代码4-9
# 将文本中的词语转换为词频矩阵，矩阵元素a[i][j]表示j词在i类文本下的词频
vectorizer = CountVectorizer()
# 统计每个词语的tf-idf权值
transformer = TfidfTransformer()
# 第一个fit_transform是计算tf-idf，第二个fit_transform是将文本转为词频矩阵
train_tfidf = transformer.fit_transform(vectorizer.fit_transform(train_corpus))
test_tfidf = transformer.fit_transform(vectorizer.fit_transform(test_corpus))
# 将tf-idf矩阵抽取出来，元素w[i][j]表示j词在i类文本中的tf-idf权重
train_weight = train_tfidf.toarray()
test_weight = test_tfidf.toarray()
print(train_weight.shape)
# 代码4-10
# K-Means聚类
clf = KMeans(n_clusters=4, algorithm='elkan')  # 选择4个中心点
# clf.fit(X)可以将数据输入到分类器里
clf.fit(train_weight)
# 4个中心点
print('4个中心点为:' + str(clf.cluster_centers_))
# 保存模型
joblib.dump(clf, 'km.pkl')
train_res = pd.Series(clf.labels_).value_counts()
# 预测的簇
labels_pred = clf.labels_
# 真实的簇
labels_true = data_qustop['label'][:1600]

print('\n训练集ARI为：' + str(metrics.adjusted_rand_score(labels_true, labels_pred)))
print('\n训练集AMI为：' + str(metrics.adjusted_mutual_info_score(labels_true, labels_pred)))
print('\n训练集调和平均为：' + str(metrics.v_measure_score(labels_true, labels_pred)))
# print('每个样本所属的簇为')
# for i in range(len(clf.labels_)):
#     print(i, ' ', clf.labels_[i])

# 代码4-11
# 预测的簇
labels_pred = clf.fit_predict(test_weight)
# 真实的簇
labels_true = data_qustop['label'][1600:]
print('\n测试集ARI为：' + str(metrics.adjusted_rand_score(labels_true, labels_pred)))
print('\n测试集AMI为：' + str(metrics.adjusted_mutual_info_score(labels_true, labels_pred)))
print('\n测试集调和平均为：' + str(metrics.v_measure_score(labels_true, labels_pred)))


def PCA(weight, dimension):
    from sklearn.decomposition import PCA
    print('原有维度: ', len(weight[0]))
    print('开始降维:')
    pca = PCA(n_components=dimension)  # 初始化PCA
    X = pca.fit_transform(weight)  # 返回降维后的数据
    print('现在维度: ', len(X[0]))
    return X
tttt = PCA(train_weight, 1500)
print(tttt.shape)
# K-Means聚类
clf1 = KMeans(n_clusters=4, init='k-means++')  # 选择4个中心点
# clf.fit(X)可以将数据输入到分类器里
clf1.fit(tttt)
labels_pred = clf1.labels_
labels_true = data_qustop['label'][:1600]
print('\n训练集ARI为1：' + str(metrics.adjusted_rand_score(labels_true, labels_pred)))
print('\n训练集AMI为1：' + str(metrics.adjusted_mutual_info_score(labels_true, labels_pred)))
print('\n训练集调和平均为1：' + str(metrics.v_measure_score(labels_true, labels_pred)))

from sklearn.cluster import Birch
clusterer = Birch(n_clusters=4, threshold=0.5)
clusterer.fit(tttt)
labels_pred = clusterer.fit_predict(tttt)

# 4个中心点
print('4个中心点为:' + str(clf.cluster_centers_))
# 保存模型
joblib.dump(clf, 'km.pkl')
train_res = pd.Series(clf.labels_).value_counts()
# 预测的簇

labels_pred = clf.fit_predict(train_weight)
# 真实的簇
labels_true = data_qustop['label'][:1600]

print('\n训练集ARI为：' + str(metrics.adjusted_rand_score(labels_true, labels_pred)))
print('\n训练集AMI为：' + str(metrics.adjusted_mutual_info_score(labels_true, labels_pred)))
print('\n训练集调和平均为：' + str(metrics.v_measure_score(labels_true, labels_pred)))
# print('每个样本所属的簇为')
# for i in range(len(clf.labels_)):
#     print(i, ' ', clf.labels_[i])
