# 代码4-2
import os
import re
import jieba
import numpy as np
import pandas as pd
# from scipy.misc import imread
import imageio
import matplotlib.pyplot as plt
from wordcloud import WordCloud

os.chdir('../data')
# 读取数据
data = pd.read_csv('new.csv',encoding='utf-8',header=None)
data.columns = ['news','label']
print(data.label.value_counts())

# 代码4-3
# 数据预处理
temp = data.news
print('null:',temp.isnull().sum())

# 去重
data_dup = temp.drop_duplicates()
# 脱敏
l1 = data_dup.astype('str').apply(lambda x : len(x)).sum()
data_qumin = data_dup.astype('str').apply(lambda x : re.sub('x','',x))  # 用空格代替x
l2 = data_qumin.astype('str').apply(lambda x : len(x)).sum()
print('减少了'+str(l1-l2)+'个字符')
# 加载自定义词典
current_dir = os.path.abspath('.')  # 获取当前目录的绝对路径
print(current_dir)
dict_file = os.path.join(current_dir,'newdic1.txt')
jieba.load_userdict(dict_file)
# 分词
data_cut = data_qumin.astype('str').apply(lambda x : list(jieba.cut(x))) # data_cut里有空格冒号
print(data_cut)
# 去停用词
stopword = pd.read_csv('stopword.txt',sep='ooo',encoding='gbk',header=None,engine='python')
stopword = [' ']+list(stopword[0])  # 将第一列变成列表
l3 = data_cut.astype('str').apply(lambda x : len(x)).sum()
# 提取出在data_cut 不在stopword中的单词，实际就是提取英文单词，去掉杂乱的字符
data_qustop = data_cut.apply(lambda x : [i for i in x if i not in stopword])
l4 = data_qustop.astype('str').apply(lambda x : len(x)).sum()
print('减少了'+str(l3-l4)+'个字符')

data_qustop = data_qustop.loc[[i for i in data_qustop.index if data_qustop[i] != []]]  # 删除空列表的行
data_qustop.drop(1999,axis=0,inplace=True)

# 词频统计
lab = [data.loc[i,'label'] for i in data_qustop.index]  # 取出对应的第一列
lab1 = pd.Series(lab,index=data_qustop.index)  # 得到第一列的series

def cipin(data_qustop, num=10):
    temp = [' '.join(x) for x in data_qustop]  # 将每一行的单词用空格连起来
    temp1 = ' '.join(temp)  # 将每一行的句子用空格连起来
    temp2 = pd.Series(temp1.split()).value_counts()  # 将所有单词放在一个列表，用空格来切割，然后计数
    print(temp2)
    return temp2[temp2 > num]  # 留下重复数大于10 的单词

data_teaching = data_qustop.loc[lab1 == '教育']
data_tour = data_qustop.loc[lab1 == '旅游']
print(data_teaching)

data_t = cipin(data_teaching, num=20)
data_to= cipin(data_tour, num=20)

# 绘制词云图
back_pic = imageio.imread('../data/background.jpg')
wc = WordCloud(font_path='C:/Windows/Fonts/simyou.ttf',  # 字体
               background_color='white',  # 背景颜色
               max_words=2000,  # 最大词数
               mask=back_pic,  # 背景图片
               max_font_size=200,  # 字体大小
               random_state=1234)  # 设置多少种随机的配色方案
#绘制教育新闻词云图
wordcloud1 = wc.fit_words(data_t)
plt.figure(figsize=(16, 8))
plt.imshow(wordcloud1)
plt.axis('off')
plt.savefig('../tmp/教育.jpg')
plt.show()
