# # 代码4-2
# import os
# import re
# import jieba
# import numpy as np
# import pandas as pd
# # from scipy.misc import imread
# import imageio
# import matplotlib.pyplot as plt
# from wordcloud import WordCloud
# from sklearn.naive_bayes import MultinomialNB
# from sklearn.model_selection import train_test_split
# from sklearn.feature_extraction.text import CountVectorizer
# from sklearn.metrics import confusion_matrix,classification_report
#
#
# os.chdir('../data')
# # 读取数据
# data = pd.read_csv('new.csv',encoding='utf-8',header=None)
# data.columns = ['new','label']
# data.label.value_counts()
#
#
# # 代码4-3
# # 数据预处理
# temp = data.new
# temp.isnull().sum()
#
# # 去重
# data_dup = temp.drop_duplicates()
# # 脱敏
# l1 = data_dup.astype('str').apply(lambda x : len(x)).sum()
# data_qumin = data_dup.astype('str').apply(lambda x : re.sub('x','',x))  # 用空格代替x
# l2 = data_qumin.astype('str').apply(lambda x : len(x)).sum()
# print('减少了'+str(l1-l2)+'个字符')
# # 加载自定义词典
# current_dir = os.path.abspath('.')  # 获取当前目录的绝对路径
# print(current_dir)
# dict_file = os.path.join(current_dir,'newdic1.txt')
# jieba.load_userdict(dict_file)
# # 分词
# data_cut = data_qumin.astype('str').apply(lambda x : list(jieba.cut(x))) # data_cut里有空格冒号
# print(data_cut)
# # 去停用词
# stopword = pd.read_csv('stopword.txt',sep='ooo',encoding='gbk',header=None,engine='python')
# stopword = [' ']+list(stopword[0])  # 将第一列变成列表
# l3 = data_cut.astype('str').apply(lambda x : len(x)).sum()
# # 提取出在data_cut 不在stopword中的单词，实际就是提取英文单词，去掉杂乱的字符
# data_qustop = data_cut.apply(lambda x : [i for i in x if i not in stopword])
# l4 = data_qustop.astype('str').apply(lambda x : len(x)).sum()
# print('减少了'+str(l3-l4)+'个字符')
#
# data_qustop = data_qustop.loc[[i for i in data_qustop.index if data_qustop[i] != []]]  # 删除空列表的行
# data_qustop.drop(1999,axis=0,inplace=True)
#
# # 词频统计
# lab = [data.loc[i,'label'] for i in data_qustop.index]  # 取出对应的第一列
# lab1 = pd.Series(lab,index=data_qustop.index)  # 得到第一列的series
#
# def cipin(data_qustop, num=10):
#     temp = [' '.join(x) for x in data_qustop]  # 将每一行的单词用空格连起来
#     temp1 = ' '.join(temp)  # 将每一行的句子用空格连起来
#     temp2 = pd.Series(temp1.split()).value_counts()  # 将所有单词放在一个列表，用空格来切割，然后计数
#     print(temp2)
#     return temp2[temp2 > num]  # 留下重复数大于10 的单词
#
# data_teaching = data_qustop.loc[lab1 == '教育']
# data_physical = data_qustop.loc[lab1 == '体育']
# data_healthy = data_qustop.loc[lab1 == '健康']
# data_tour = data_qustop.loc[lab1 == '旅游']
# print(data_teaching)
#
# data_t = cipin(data_teaching, num=20)
# data_p = cipin(data_physical, num=20)
# data_h = cipin(data_healthy, num=20)
# data_to= cipin(data_tour, num=20)
#
# # 绘制词云图
# back_pic = imageio.imread('../data/background.jpg')
# wc = WordCloud(font_path='C:/Windows/Fonts/simkai.ttf',  # 字体
#                background_color='white',  # 背景颜色
#                max_words=2000,  # 最大词数
#                mask=back_pic,  # 背景图片
#                max_font_size=200,  # 字体大小
#                random_state=1234)  # 设置多少种随机的配色方案
# #绘制教育新闻词云图
# wordcloud1 = wc.fit_words(data_t)
# plt.figure(figsize=(16, 8))
# plt.imshow(wordcloud1)
# plt.axis('off')
# plt.savefig('../tmp/教育.jpg')
# plt.show()
#
# # 绘制体育新闻词云图
# wordcloud2 = wc.fit_words(data_p)
# plt.figure(figsize=(16, 8))
# plt.imshow(wordcloud2)
# plt.axis('off')
# plt.savefig('../tmp/体育.jpg')
# plt.show()
#
# # 绘制健康新闻词云图
# wordcloud3 = wc.fit_words(data_h)
# plt.figure(figsize=(16, 8))
# plt.imshow(wordcloud3)
# plt.axis('off')
# plt.savefig('../tmp/健康.jpg')
# plt.show()
#
# # 绘制教育旅游词云图
# wordcloud4 = wc.fit_words(data_to)
# plt.figure(figsize=(16, 8))
# plt.imshow(wordcloud4)
# plt.axis('off')
# plt.savefig('../tmp/旅游.jpg')
# plt.show()
#
#
# # 代码4-4
# num = 400
# adata = data_teaching.sample(num, random_state=5,replace = True)
# bdata = data_physical.sample(num, random_state=5,replace = True)
# cdata = data_healthy.sample(num, random_state=5,replace = True)
# ddata = data_tour.sample(num, random_state=5,replace = True)
# data_sample = pd.concat([adata, bdata,cdata,ddata])
#
# data = data_sample.apply(lambda x: ' '.join(x))
# lab = pd.DataFrame(['教育'] * num + ['体育'] * num+['健康']*num+['旅游']*num, index=data.index)
# my_data = pd.concat([data, lab], axis=1)
# my_data.columns = ['news', 'label']
#
#
# # 代码4-5
# # 划分训练集和测试集
# x_train, x_test, y_train, y_test = train_test_split(
#     my_data.news, my_data.label, test_size=0.2, random_state=123)  # 构建词频向量矩阵
# # 训练集
# cv = CountVectorizer()  # 将文本中的词语转化为词频矩阵
# train_cv = cv.fit_transform(x_train)  # 拟合数据，再将数据转化为标准化格式
# train_cv.toarray()
# train_cv.shape  # 查看数据大小
# cv.vocabulary_  # 查看词库内容
#
#
# # 测试集
# cv1 = CountVectorizer(vocabulary=cv.vocabulary_)
# test_cv = cv1.fit_transform(x_test)
# test_cv.shape
# # 朴素贝叶斯
# nb = MultinomialNB()   # 朴素贝叶斯分类器
# nb.fit(train_cv, y_train)   # 训练分类器
# pre = nb.predict(test_cv)  # 预测nb = MultinomialNB()
#
#
# # 代码4-6
# # 评价
# cm = confusion_matrix(y_test, pre)
# cr = classification_report(y_test, pre)
# print(cm)
# print(cr)


# 代码4-7
import re
import os
import json
import jieba
import pandas as pd
from sklearn.cluster import KMeans
import joblib
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
import  random
from sklearn import metrics

# 数据读取
data = pd.read_csv('../data/new.csv',encoding='utf-8',header=None)
data.columns = ['news','label']

data_dup = data.drop_duplicates()

data_qumin = data_dup.copy()
data_qumin['news'] = data_dup['news'].astype('str').apply(lambda x : re.sub('x','',x))


# 代码4-8
#数据预处理
stopword = pd.read_csv('../data/stopword.txt',sep='ooo',encoding='gbk',header=None,engine='python')
stopword = [' '] + list(stopword[0])

dict_file = pd.read_csv('../data/newdic1.txt')
jieba.load_userdict(dict_file)

data_cut = data_qumin.copy()
data_cut['news'] = data_qumin['news'].astype('str').apply(lambda x : list(jieba.cut(x)))     
data_cut['news'] = data_cut['news'].apply(lambda x : [i for i in x if i not in stopword])

data_cut['news'] = data_cut['news'].apply(lambda x : [''.join(re.findall('[\u4e00-\u9fa5]', i)) for i in x])
data_cut['news'] = data_cut['news'].apply(lambda x : [i for i in x if len(i)>1])

data_qustop = pd.DataFrame()
ind = [len(i)>1 for i in data_cut.iloc[:,0] ]
data_qustop = data_cut.loc[ind,:]

#划分训练集和测试集
reps = {'教育': '1', '体育': '2', '健康': '3', '旅游': '4'}
data_qustop['label'] = data_qustop['label'].map(lambda x : reps.get(x))

corpus=[]
for i in data_qustop.iloc[:,0]:
    temp=' '.join(i)
    corpus.append(temp)

train_corpus=corpus[:1600]
test_corpus=corpus[1600:]


# 代码4-9
# 将文本中的词语转换为词频矩阵，矩阵元素a[i][j]表示j词在i类文本下的词频
vectorizer = CountVectorizer()
# 统计每个词语的tf-idf权值
transformer = TfidfTransformer()
# 第一个fit_transform是计算tf-idf，第二个fit_transform是将文本转为词频矩阵
train_tfidf = transformer.fit_transform(vectorizer.fit_transform(train_corpus))
test_tfidf = transformer.fit_transform(vectorizer.fit_transform(test_corpus))
# 将tf-idf矩阵抽取出来，元素w[i][j]表示j词在i类文本中的tf-idf权重
train_weight = train_tfidf.toarray()
test_weight = test_tfidf.toarray()


# 代码4-10
# K-Means聚类
clf = KMeans(n_clusters=4, algorithm='elkan') # 选择4个中心点
# clf.fit(X)可以将数据输入到分类器里
clf.fit(train_weight)
 # 4个中心点
print('4个中心点为:' + str(clf.cluster_centers_))
# 保存模型
joblib.dump(clf, 'km.pkl')
train_res = pd.Series(clf.labels_).value_counts()
# 预测的簇
labels_pred = clf.labels_
# 真实的簇
labels_true = data_qustop['label'][:1600]

print('\n训练集ARI为：' + str(metrics.adjusted_rand_score(labels_true, labels_pred)))
print('\n训练集AMI为：' + str(metrics.adjusted_mutual_info_score(labels_true, labels_pred)))
print('\n训练集调和平均为：' + str(metrics.v_measure_score(labels_true, labels_pred)))
print('每个样本所属的簇为')
for i in range(len(clf.labels_)):
    print(i , ' ', clf.labels_[i])


# 代码4-11
# 预测的簇
labels_pred = clf.fit_predict(test_weight)
# 真实的簇
labels_true = data_qustop['label'][1600:]
print('\n测试集ARI为：' + str(metrics.adjusted_rand_score(labels_true, labels_pred)))
print('\n测试集AMI为：' + str(metrics.adjusted_mutual_info_score(labels_true, labels_pred)))
print('\n测试集调和平均为：' + str(metrics.v_measure_score(labels_true, labels_pred)))

'''
i = 13513
i+=1
s = i*5-5
e = i*5
print(t[s:e])
'''


def PCA(weight, dimension):  
  
    from sklearn.decomposition import PCA  
  
    print ('原有维度: ', len(weight[0]))  
    print ('开始降维:')  
  
    pca = PCA(n_components=dimension) # 初始化PCA  
    X = pca.fit_transform(weight) # 返回降维后的数据  
    print ('现在维度: ', len(X[0])) 
  
    return X 

tttt = PCA(train_weight,300)

# K-Means聚类
clf = KMeans(n_clusters=4,init='k-means++') # 选择4个中心点
# clf.fit(X)可以将数据输入到分类器里
clf.fit(tttt)



from sklearn.cluster import Birch  
clusterer = Birch(n_clusters=4,threshold = 0.5)  
clusterer.fit(tttt)
labels_pred = clusterer.fit_predict(tttt) 



 # 4个中心点
print('4个中心点为:' + str(clf.cluster_centers_))
# 保存模型
joblib.dump(clf, 'km.pkl')
train_res = pd.Series(clf.labels_).value_counts()
# 预测的簇
labels_pred = clf.fit_predict(tttt)
# 真实的簇
labels_true = data_qustop['label'][:1600]

print('\n训练集ARI为：' + str(metrics.adjusted_rand_score(labels_true, labels_pred)))
print('\n训练集AMI为：' + str(metrics.adjusted_mutual_info_score(labels_true, labels_pred)))
print('\n训练集调和平均为：' + str(metrics.v_measure_score(labels_true, labels_pred)))
print('每个样本所属的簇为')
for i in range(len(clf.labels_)):
    print(i , ' ', clf.labels_[i])
