# -*-coding: utf-8 -*-
'''
Created on 2018-8-8

@author: xubaifu

词云制作，情感分析

'''
import codecs
import jieba

import jieba.posseg as pseg
import jieba.analyse


from scipy.misc import imread
from wordcloud import ImageColorGenerator
# from os import path

import matplotlib.pyplot as plt
from wordcloud import WordCloud

import numpy as np
from snownlp import SnowNLP
from snownlp import sentiment
# 构建停用词表
stop_words = 'txt/stopWords.txt'
stopwords = codecs.open(stop_words, 'r', encoding='utf8').readlines()
stopwords = [ w.strip() for w in stopwords ]
# 结巴分词后的停用词性 [标点符号、连词、助词、副词、介词、时语素、‘的’、数词、方位词、代词]
stop_flag = ['x', 'c', 'u', 'd', 'p', 't', 'uj', 'm', 'f', 'r', 'ul']

jieba.load_userdict("txt/userdict.txt")


class File_Review:

    # 自定义停用词
    def load_stopwords(self, path='txt/stopwords.txt'):  # 文件必须utf-8编码.encode('utf-8')
        # 加载停用词 
        with open(path, 'r', encoding='UTF-8') as f:  
            stopwords = filter(lambda x: x, map(lambda x: x.strip(), f.readlines()))  
    #         stopwords.extend([' ', '\t', '\n'])  
        return frozenset(stopwords)  
    
    # 对一篇文章分词、去停用词
    def cut_words(self, filename):
        result = []
        with open(filename, 'r', encoding='UTF-8') as f:
            text = f.read()
            words = pseg.cut(text)
        for word, flag in words:
            if flag not in stop_flag and word not in stopwords and len(word) > 1:
                result.append(word)
                
        #关键词提取       
        keywords_tfidf = jieba.analyse.extract_tags(text)
        print(keywords_tfidf)
        
        return result  # 返回数组
#         return ' '.join(result) #返回字符串
    
    # 统计词频
    def all_list(self, arr):
        result = {}
        for i in set(arr):
            result[i] = arr.count(i)
        return result

    # 构建词云图
    def draw_wordcloud(self, txt):
        # 读入背景图片
        bj_pic = imread('C:\\Users\\xubaifu\\Desktop\\Figure_1.png')
        wc1 = WordCloud(
            # mask=bj_pic,
            background_color="white",
            width=1000,
            height=860,
            font_path="C:\\Windows\\Fonts\\STFANGSO.ttf",  # 不加这一句显示口字形乱码
            margin=2)
#         wc2 = wc1.generate(txt)         #我们观察到generate()接受一个Unicode的对象，所以之前要把文本处理成unicode类型
        wc2 = wc1.fit_words(txt)  # 根据词频生成词云
        
        # image_colors=ImageColorGenerator(bj_pic)
        
        plt.imshow(wc2)
        plt.axis("off")
        plt.show()

    
if __name__ == '__main__':
    file_review = File_Review()
#     result = file_review.cut_words('articles.txt')
    result = file_review.cut_words('txt/lufax.txt')
    # 统计词频
    word_count = file_review.all_list(result)
    
    # 筛选出词频大于2的数据
    word_count = {k:v for k, v in word_count.items()  if v >= 5}
    # 制作词云
    file_review.draw_wordcloud(word_count)
    # 情感分析
#     file_review.sentiments_analyze()

