import os
import re
import jieba
import numpy as np
import pandas as pd
# from scipy.misc import imread
import imageio
import matplotlib.pyplot as plt
from wordcloud import WordCloud
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import confusion_matrix, classification_report

# pd.set_option('display.max_row',None)
# pd.set_option("display.max_colwidth", None)

os.chdir('./data/')

data=[]
with open('negative_samples.txt','r',encoding='utf-8') as f1 , open('positive_samples.txt','r',encoding='utf-8') as f2:
    data1=f1.readlines()
    data2=f2.readlines()
    for i in data1:
        data.append([eval(i.replace("\n", ""))['text'],eval(i.replace("\n", ""))['label']])
    for i in data2:
        data.append([eval(i.replace("\n", ""))['text'],eval(i.replace("\n", ""))['label']])

    data=pd.DataFrame(data)
    data.columns = ['content', 'label']



# 数据预处理
temp = data.content
temp.isnull().sum()
# 去重
data_dup = temp.drop_duplicates()
# 脱敏
l1 = data_dup.astype('str').apply(lambda x: len(x)).sum()
data_qumin = data_dup.astype('str').apply(lambda x: re.sub('x', '', x))
# 用空格代替 x
l2 = data_qumin.astype('str').apply(lambda x: len(x)).sum()
print('减少了' + str(l1 - l2) + '个字符')
# 加载自定义字典
current_dir = os.path.abspath('.')  # 获取当前目录的绝对路
print(current_dir)
dict_file = os.path.join(current_dir, 'newdic1.txt')
jieba.load_userdict(dict_file)
# 分词
data_cut = data_qumin.astype('str').apply(lambda x: list(jieba.cut(x)))
# data_cut 里有空格、冒号
print(data_cut)
# 去停用词
stopword = pd.read_csv('stopword.txt', sep='ooo', encoding='gbk', header=None,
                       engine='python')
stopword = [' '] + list(stopword[0])  # 将第一列变成列表
l3 = data_cut.astype('str').apply(lambda x: len(x)).sum()
# 提取出在 data_cut 中、不在 stopword 中的词，实际就是提取英文单词，去掉杂乱的字符
data_qustop = data_cut.apply(lambda x: [i for i in x if i not in stopword])
l4 = data_qustop.astype('str').apply(lambda x: len(x)).sum()
print('减少了' + str(l3 - l4) + '个字符')

data_qustop = data_qustop.loc[[i for i in data_qustop.index if data_qustop[i] !=[]]]  # 删除空列表的行
data_qustop.drop(1999, axis=0, inplace=True)

# 词频统计
lab = [data.loc[i, 'label'] for i in data_qustop.index]  # 取出对应的第一列
lab1 = pd.Series(lab, index=data_qustop.index)  # 转换为 Series

print("________",data_qustop,"______")
def cipin(data_qustop, num=10):
    print(data_qustop)
    temp = [' '.join(x) for x in data_qustop]  # 将每一行的词用空格连起来
    temp1 = ' '.join(temp)  # 将每一行的句子用空格连起来
    temp2 = pd.Series(temp1.split()).value_counts()  # 将所有词放在一个列表中，用空格来切分，然后计数
    print(temp2)
    return temp2[temp2 > num]  # 留下词频大于 10 的词


data_positive = data_qustop.loc[lab1 == 1]
data_negative = data_qustop.loc[lab1 == 0]


data_p = cipin(data_positive, num=20)
data_n = cipin(data_negative, num=20)


# 绘制词云图
back_pic = imageio.v2.imread('./background.jpg')
wc = WordCloud(font_path='C:/Windows/Fonts/simkai.ttf',  # 字体
               background_color='white',  # 背景颜色
               max_words=2000,  # 最大词数
               mask=back_pic,  # 背景图片
               max_font_size=200,  # 字体大小
               random_state=1234)  # 设置多少种随机的配色方案
# 绘制好评文本词云图
wordcloud1 = wc.fit_words(data_p)
plt.figure(figsize=(16, 8))
plt.imshow(wordcloud1)
plt.axis('off')
plt.show()
# 绘制差评文本词云图
wordcloud4 = wc.fit_words(data_n)
plt.figure(figsize=(16, 8))
plt.imshow(wordcloud4)
plt.axis('off')
plt.show()
