#!/usr/bin/env python
# encoding: utf-8
'''
@author: ZhouXin
@time: 2018/6/13 0013 下午 17:43
'''

import jieba  # 分词包
import numpy
import codecs
import pandas
import matplotlib.pyplot as plot
from wordcloud import WordCloud, ImageColorGenerator
import os
from scipy.misc import imread

# 读取txt文件内容

file = codecs.open(u'《梦里花落知多少》三毛.txt', 'rb', encoding='gb18030')
content = file.read()
file.close()

# 创建一个容器来存放切词后又经过过滤的词
segment = []
# 切词
words = jieba.cut(content)
# 过滤切词words列表
for word in words:
    if len(word) > 1 and word != '\r\n':
        segment.append(word)

# 去停用词,过滤掉不想在慈云图中出现的词
words_df = pandas.DataFrame({'segment': segment})
words_df.head()
stopwords = pandas.read_csv("stopwords.txt",
                            index_col=False,
                            quoting=3,
                            sep="\t",
                            names=['stopword'],
                            encoding='utf-8')
a = []
for word in list(words_df.segment):
    if word not in list(stopwords.stopword):
        a.append(word)
words_df = pandas.DataFrame({'segment': a})
# words_df = words_df[~words_df.segment.isin(stopwords.stopword)]

# 统计词频率
words_stat = words_df.groupby(by=['segment'])['segment'].agg({'计数': numpy.size})
words_stat = words_stat.reset_index()  # .sort_values(columns="计数", ascending=False)


# 画词云图
# %matplotlib
word_cloud = WordCloud(font_path="simhei.ttf", background_color="black")
key_words = dict(words_stat.head(7515).itertuples(index=False))
word_cloud = word_cloud.fit_words(key_words)
plot.axis('off')
plot.imshow(word_cloud)
plot.show()

# 指定图片作为背景
# %matplotlib
img = imread('bg_pic.png')
word_cloud = WordCloud(background_color="white", mask=img, font_path='simhei.ttf')
word_cloud = word_cloud.fit_words(dict(words_stat.head(4000).itertuples(index=False)))
imgColors = ImageColorGenerator(img)
plot.axis("off")
plot.imshow(word_cloud.recolor(color_func=imgColors))
plot.show()
