'''
    content：采用jieba分析评论，并且使用词云显示
    author：kktao
'''
import jieba.analyse
import os
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")

cur_path = os.path.join(os.getcwd(),"weibo_crawl\\data")
file_path = os.path.join(cur_path,"result_xpath_kx.txt")


def jieba_analysis():
    file_in = open(file_path,'r',encoding='utf-8')
    content = file_in.read()
    try:
        tags = jieba.analyse.extract_tags(content, topK=100, withWeight=True)
        
        for v, n in tags:
            #权重是小数，为了凑整，乘了一万
            print(v + '	' + str(int(n * 10000)))
    finally:
        file_in.close()

def stopwordslist():
    stopwords = [line.strip() for line in open(os.path.join(cur_path,'chinsesstoptxt.txt'),encoding='UTF-8').readlines()]
    return stopwords


def seg_depart(sentence):
    # 对文档中的每一行进行中文分词
    print("正在分词")
    sentence_depart = jieba.cut(sentence.strip())
    # 创建一个停用词列表
    stopwords = stopwordslist()
    # 输出结果为outstr
    outstr = ''
    # 去停用词
    for word in sentence_depart:
        if word not in stopwords:
            if word != '\t':
                outstr += word
                outstr += " "
    return outstr

def wordcloud_analysis():
    text = open(file_path,encoding="UTF-8").read()
    text = seg_depart(text)

    #绘图
    #创建画板
    plt.figure(figsize=(10,8),dpi=80)
    #字体路径
    font = r"C:\Windows\Fonts\simfang.ttf"
    #绘制词云图
    wc=WordCloud(font_path=font,width=800,height=500,scale=2,mode="RGBA",background_color=None).generate(text)
    #显示词云
    plt.imshow(wc,interpolation="bilinear")
    plt.axis("off")
    plt.savefig(os.path.join(cur_path,"sg8.png"))

if __name__ == "__main__":
    wordcloud_analysis()
    