# coding:utf-8
from BigDataWeb.algorithm import Algorithm
import jieba
import jieba.posseg as pseg
from jieba.analyse import textrank
import os
import wordcloud


class TextSegmentation(Algorithm):
    text = ""
    custom_words = []
    seg_list = []
    words = []
    key_words = []
    word_cloud_path = ""
    
    def __init__(self):
        Algorithm.__init__(self)
        self.algorithm_name = "中文分词"
        self.ipynb_template_name = "text_segmentation-template.ipynb"
        self.word_cloud_path = os.path.join(self.work_folder_path, "word_cloud.jpg")
    
    def implent(self):
        Algorithm.implent(self)
        for custom_word in self.custom_words:
            jieba.add_word(custom_word)
        self.seg_list = jieba.cut(sentence=self.text, cut_all=False)
        self.words = pseg.cut(sentence=self.text)
        self.key_words = textrank(sentence=self.text, topK=10, withWeight=True)
        
        # 词云
        word_cloud = wordcloud.WordCloud(width=800, height=600, font_path="msyh.ttf", background_color="white")
        word_list = []
        for word, flag in pseg.cut(sentence=self.text):
            # 只处理名词和动词
            if flag in["n", "nr", "ns", "nt", "nz", "v", "vn"]:
                word_list.append(word)
        word_cloud.generate(" ".join(word_list))
        word_cloud.to_file(self.word_cloud_path)
    
    def prepareIpynbItems(self):
        Algorithm.prepareIpynbItems(self)
        self.ipynb_items["#text#"] = self.text
        self.ipynb_items["#custom_words#"] = self.custom_words
