# 代码4-2
import os
import re
import jieba
import numpy as np
import pandas as pd
# from scipy.misc import imread
import imageio
import matplotlib.pyplot as plt
from wordcloud import WordCloud
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import confusion_matrix,classification_report

os.chdir('../data')
# 读取数据
data = pd.read_csv('new.csv',encoding='utf-8',header=None)
data.columns = ['news','label']
data.label.value_counts()

# 代码4-3
# 数据预处理
temp = data.news
temp.isnull().sum()

# 去重
data_dup = temp.drop_duplicates()
# 脱敏
l1 = data_dup.astype('str').apply(lambda x : len(x)).sum()
data_qumin = data_dup.astype('str').apply(lambda x : re.sub('x','',x))  # 用空格代替x
l2 = data_qumin.astype('str').apply(lambda x : len(x)).sum()
print('减少了'+str(l1-l2)+'个字符')
# 加载自定义词典
current_dir = os.path.abspath('.')  # 获取当前目录的绝对路径
print(current_dir)
dict_file = os.path.join(current_dir,'newdic1.txt')
jieba.load_userdict(dict_file)
# 分词
data_cut = data_qumin.astype('str').apply(lambda x : list(jieba.cut(x))) # data_cut里有空格冒号
# print(data_cut)
# 去停用词
stopword = pd.read_csv('stopword.txt',sep='ooo',encoding='gbk',header=None,engine='python')
stopword = [' ']+list(stopword[0])  # 将第一列变成列表
l3 = data_cut.astype('str').apply(lambda x : len(x)).sum()
# 提取出在data_cut 不在stopword中的单词，实际就是提取英文单词，去掉杂乱的字符
data_qustop = data_cut.apply(lambda x : [i for i in x if i not in stopword])
l4 = data_qustop.astype('str').apply(lambda x : len(x)).sum()
print('减少了'+str(l3-l4)+'个字符')

data_qustop = data_qustop.loc[[i for i in data_qustop.index if data_qustop[i] != []]]  # 删除空列表的行
data_qustop.drop(1999,axis=0,inplace=True)

# 词频统计
lab = [data.loc[i,'label'] for i in data_qustop.index]  # 取出对应的第一列
lab1 = pd.Series(lab,index=data_qustop.index)  # 得到第一列的series

def cipin(data_qustop, num=10):
    temp = [' '.join(x) for x in data_qustop]  # 将每一行的单词用空格连起来
    temp1 = ' '.join(temp)  # 将每一行的句子用空格连起来
    temp2 = pd.Series(temp1.split()).value_counts()  # 将所有单词放在一个列表，用空格来切割，然后计数
    # print(temp2)
    return temp2[temp2 > num]  # 留下重复数大于10 的单词

data_teaching = data_qustop.loc[lab1 == '教育']
data_physical = data_qustop.loc[lab1 == '体育']
data_healthy = data_qustop.loc[lab1 == '健康']
data_tour = data_qustop.loc[lab1 == '旅游']
print(data_teaching)

data_t = cipin(data_teaching, num=20)
data_p = cipin(data_physical, num=20)
data_h = cipin(data_healthy, num=20)
data_to= cipin(data_tour, num=20)

# 绘制词云图
back_pic = imageio.imread('../data/background.jpg')
wc = WordCloud(font_path='C:/Windows/Fonts/simkai.ttf',  # 字体
               background_color='white',  # 背景颜色
               max_words=2000,  # 最大词数
               mask=back_pic,  # 背景图片
               max_font_size=200,  # 字体大小
               random_state=1234)  # 设置多少种随机的配色方案
#绘制教育新闻词云图
wordcloud1 = wc.fit_words(data_t)
plt.figure(figsize=(16, 8))
plt.imshow(wordcloud1)
plt.axis('off')
plt.savefig('../tmp/教育.jpg')
plt.show()



# 代码4-4
num = 400
adata = data_teaching.sample(num, random_state=5,replace = True)
bdata = data_physical.sample(num, random_state=5,replace = True)
cdata = data_healthy.sample(num, random_state=5,replace = True)
ddata = data_tour.sample(num, random_state=5,replace = True)
data_sample = pd.concat([adata, bdata,cdata,ddata])
print('shape:',data_sample.shape)
data = data_sample.apply(lambda x: ' '.join(x))
lab = pd.DataFrame(['教育'] * num + ['体育'] * num+['健康']*num+['旅游']*num, index=data.index)
my_data = pd.concat([data, lab], axis=1)
print(my_data.shape)
my_data.columns = ['news', 'label']

# 代码4-5
# 划分训练集和测试集
x_train, x_test, y_train, y_test = train_test_split(
    my_data.news, my_data.label, test_size=0.2, random_state=123)  # 构建词频向量矩阵
# 训练集
cv = CountVectorizer()  # 将文本中的词语转化为词频矩阵

train_cv = cv.fit_transform(x_train)  # 拟合数据，再将数据转化为标准化格式
print(train_cv)
train_cv.toarray()
print(train_cv.shape)  # 查看数据大小
# print(cv.vocabulary_)  # 查看词库内容


# 测试集
cv1 = CountVectorizer(vocabulary=cv.vocabulary_)
test_cv = cv1.fit_transform(x_test)
# test_cv.shape
# 朴素贝叶斯
nb = MultinomialNB()   # 朴素贝叶斯分类器
nb.fit(train_cv, y_train)   # 训练分类器
pre = nb.predict(test_cv)  # 预测nb = MultinomialNB()


# 代码4-6
# 评价
cm = confusion_matrix(y_test, pre)
cr = classification_report(y_test, pre)
print(cm)
print(cr)



