# -*- coding: utf-8 -*-
# -文件说明   官方例子在最后面函数wordcloud_demo-
# @Time    : 2019/11/19 15:12
# @Author  : hwx
# @Email   : @163.com
# @File    : 词云.py
# @Software: PyCharm
# 结巴中文分词网址:https://github.com/fxsjy/jieba
import jieba
import os

from wordcloud import wordcloud
imgfolder = 'img/'  # 图片目录


def stopwordslist():
    """
    jieba分词的停止词过滤
    :return:
    """
    stopwords = [
        line.strip() for line in open(
            'textfiles/stopword.txt', 'r',
            encoding='utf-8').readlines()]    # 编码问题utf-8和gbk
    return stopwords


def str_analyse(str):
    """
    参数为一段字符串的词云分析
    :param str:
    """
    print('----str_analyse函数----')
    result = ',' .join(jieba.lcut(str))
    print(result)
    w = wordcloud.WordCloud(
        width=1000,
        height=600,
        font_path='simsun.ttc')
    w.generate(result)
    w.to_file(imgfolder + str + '.png')
    print(str, '：词云输出成功')


# str_analyse('参数为一段字符串的词云分析')


def txt_analyse(filename):
    """
    文本词云分析
    :param filename:
    """
    # filename = 'textfiles/三国演义.txt'  # hlm.txt
    outfilename = filename.split('/')[-1][:-4] + '.png'
    print(outfilename)
    with open(filename, 'r', encoding='UTF-8') as f:
        text = f.read()
    print(text[:100])
    import re
    wordlist = jieba.lcut(re.sub('曰', '', text))
    conter = []
    stopwords = stopwordslist()
    for word in wordlist:
        if len(word) > 1:
            if word not in stopwords:
                conter.append(word)
    wordlist = ','.join(conter)
    w = wordcloud.WordCloud(
        width=1000,
        height=600,
        font_path='simsun.ttc')
    w.generate(wordlist)
    # print(wordlist)
    w.to_file(imgfolder + outfilename)
    print(outfilename, '输出成功')


txt_analyse('textfiles/三国演义.txt')


def wordcloud_demo():
    """
    jieba例子函数
    """

    print('=' * 40)
    print('1. 分词')
    print('-' * 40)

    seg_list = jieba.cut("我来到北京清华大学", cut_all=True)
    print("Full Mode: " + "/ ".join(seg_list))  # 全模式

    seg_list = jieba.cut("我来到北京清华大学", cut_all=False)
    print("Default Mode: " + "/ ".join(seg_list))  # 默认模式

    seg_list = jieba.cut("他来到了网易杭研大厦")
    print(", ".join(seg_list))

    seg_list = jieba.cut_for_search("小明硕士毕业于中国科学院计算所，后在日本京都大学深造")  # 搜索引擎模式
    print(", ".join(seg_list))

    print('=' * 40)
    print('2. 添加自定义词典/调整词典')
    print('-' * 40)

    print('/'.join(jieba.cut('如果放到post中将出错。', HMM=False)))
    # 如果/放到/post/中将/出错/。
    # suggest_freq(segment, tune=True) 可调节单个词语的词频，使其能（或不能）被分出来
    print(jieba.suggest_freq(('中', '将'), True))
    # 494
    print('/'.join(jieba.cut('如果放到post中将出错。', HMM=False)))
    # 如果/放到/post/中/将/出错/。
    print('/'.join(jieba.cut('「台中」正确应该不会被切开', HMM=False)))
    # 「/台/中/」/正确/应该/不会/被/切开
    print(jieba.suggest_freq('台中', True))
    # 69
    print('/'.join(jieba.cut('「台中」正确应该不会被切开', HMM=False)))
    # 「/台中/」/正确/应该/不会/被/切开

    print('=' * 40)
    print('3. 关键词提取')
    print('-' * 40)
    print(' TF-IDF')
    print('-' * 40)

    s = "此外，公司拟对全资子公司吉林欧亚置业有限公司增资4.3亿元，增资后，吉林欧亚置业注册资本由7000万元增加到5亿元。吉林欧亚置业主要经营范围为房地产开发及百货零售等业务。目前在建吉林欧亚城市商业综合体项目。2013年，实现营业收入0万元，实现净利润-139.13万元。"
    for x, w in jieba.analyse.extract_tags(s, withWeight=True):
        print('%s %s' % (x, w))

    print('-' * 40)
    print(' TextRank')
    print('-' * 40)

    for x, w in jieba.analyse.textrank(s, withWeight=True):
        print('%s %s' % (x, w))

    print('=' * 40)
    print('4. 词性标注')
    print('-' * 40)

    words = jieba.posseg.cut("我爱北京天安门")
    for word, flag in words:
        print('%s %s' % (word, flag))

    print('=' * 40)
    print('6. Tokenize: 返回词语在原文的起止位置')
    print('-' * 40)
    print(' 默认模式')
    print('-' * 40)

    result = jieba.tokenize('永和服装饰品有限公司')
    for tk in result:
        print("word %s\t\t start: %d \t\t end:%d" % (tk[0], tk[1], tk[2]))

    print('-' * 40)
    print(' 搜索模式')
    print('-' * 40)

    result = jieba.tokenize('永和服装饰品有限公司', mode='search')
    for tk in result:
        print("word %s\t\t start: %d \t\t end:%d" % (tk[0], tk[1], tk[2]))


# wordcloud_demo()
