import jieba
from jieba import analyse
import pandas as pd
import codecs
from wordcloud import WordCloud
import PIL
import numpy as np
import matplotlib.pyplot as plt

# 载入文本

with open('sheng_content.txt', 'r', encoding='utf-8') as file:
    data = file.read()

datas = data.split('\n')
# 载入停用词表
stopwords = [line.strip() for line in
             codecs.open('stopwords_cn.txt', 'r', encoding='utf-8').readlines()]

# 保存全局分词，用于词频统计
segments = []

for i in range(len(datas)):
    content = datas[i]
    words = jieba.lcut(content, cut_all=False)

    for word in words:
        # 停用词判断，非停用词才记录
        if word not in stopwords:
            if len(word) >= 2:
                segments.append({'word': word, 'count': 1})

# 将结果数组转为dataframe
seg_df = pd.DataFrame(segments)

# 词频统计
word_df = seg_df.groupby('word')['count'].sum()
# word_df = word_df.drop([' '])

result = word_df.sort_values(ascending=False)

# 导出频率前300的关键词
result[:300].to_excel('sheng_result.xlsx')
# 制作词云图
image_background = PIL.Image.open('heart_shape.png')

MASK = np.array(image_background)
wc = WordCloud(font_path='msyh.ttc', background_color='white', width=4000, height=2000, margin=10, max_words=200,
               mask=MASK).fit_words(result[0:300])  # mysh.ttc微软雅黑
plt.imshow(wc)

plt.show()
wc.to_file('sheng_world.png')
