# coding=utf-8
"""
Function：分析抓取的数据，统计词频、生成词云
"""
import json
import re
import jieba
from matplotlib import pyplot as plt

from lxml import etree
from wordcloud import WordCloud

all_txt = ""


def read_file():
    """读取 URL"""
    with open("yiliao.json", encoding='utf-8') as f:
        for line in f:
            if not line:
                continue
            yield json.loads(line.strip(), strict=False)


def parse(data):
    global all_txt
    html = data.get("page", "")
    obj = etree.HTML(html)
    name = obj.xpath("""//h1[@class="Post-Title"]""")
    user = obj.xpath("""//*[@class="AuthorInfo-content"]//a[@class="UserLink-link"]""")
    content = obj.xpath("""//*[@class="Post-RichTextContainer"]//*[contains(@class, "Post-RichText")]""")
    content = get_value(content)
    print(content)

    all_txt = f"{all_txt}\t{content}"


def word_count():
    """
    统计词频
    :return:
    """
    global all_txt
    counts = {}
    words = jieba.cut(all_txt, cut_all=False)
    stop_words = ["的", "地", "我", "我们", "你", "你们", "他", "他们", "她", "她们", "它", "它们", "百万"]
    word_cut_join = " ".join(words)

    # 生成词云
    wc = WordCloud(
        font_path='simsun.ttc',  # 设置字体
        max_words=100,  # 词云显示的最大词数
        # mask=mask_img,#设置背景图片
        stopwords=stop_words,
        background_color='white'  # 背景颜色
    ).generate(word_cut_join)

    plt.imshow(wc)
    plt.axis('off')  # 去掉坐标轴
    plt.savefig('title.jpg')
    plt.show()

    # for word in words:
    #     if word not in stop_words:
    #         if len(word) == 1:
    #             continue
    #         else:
    #             counts[word] = counts.get(word, 0) + 1
    #
    # items = list(counts.items())
    # items.sort(key=lambda x: x[1], reverse=True)
    # for i in range(50):
    #     word, count = items[i]
    #     print(f"{word}\t{count}")


def get_value(elem):
    """
    提取 elem 中的元素
    :param elem:
    :return:
    """
    value = ""
    if len(elem) == 1:
        value = elem[0].xpath("string(.)").strip()
    elif len(elem) > 1:
        res = []
        for item in elem:
            res.append(item.xpath("string(.)").strip())

        return res
    return value


def main():
    for data in read_file():
        parse(data)

    word_count()


if __name__ == '__main__':
    main()
