import jieba
import requests
from lxml import html
import time
from wordcloud import WordCloud
import jieba
import matplotlib.pyplot as plt
import collections
import pkuseg
import os
from apscheduler.schedulers.background import BlockingScheduler

sched = BlockingScheduler(misfire_grace_time=5)


def func_main():
    def get_stopwords(stopwords_path):
        stopwords_list = os.listdir(stopwords_path)
        stopwords = set()
        for i in stopwords_list:
            with open(stopwords_path + i, "r", encoding="utf-8") as f:
                for line in f:
                    stopwords = stopwords | set(f.read().splitlines())
        return stopwords

    def create_wordcloud(text, stop_words, max_words):
        # words = jieba.cut(text, cut_all=False)

        # # filtered_words = []
        # # for word in words:
        # #     if word not in stop_words:
        # #         filtered_words.append(word)

        # words = " ".join(words)

        seg = pkuseg.pkuseg()
        words = seg.cut(result)
        words = " ".join(words)

        wordcloud = WordCloud(
            font_path='simhei.ttf',
            background_color='white',
            width=1600,
            height=1600,
            min_font_size=10,
            max_font_size=200,
            max_words=max_words,
            stopwords=stop_words
        ).generate(words)
        plt.imshow(wordcloud)
        plt.axis("off")
        plt.show()
        return words

    URL = "https://cn.govopendata.com/xinwenlianbo/{}/".format(
        time.strftime("%Y%m%d", time.localtime()))
    page = requests.get(URL)
    tree = html.fromstring(page.content)
    result = ""
    for i in range(1, 100):
        xpath = '/html/body/div[2]/div[4]/div[1]/p[{}]/text()'.format(i)
        content = tree.xpath(xpath)
        if not content:
            break
        result += content[0]
    stopwords_path = r'C:\Users\l\Desktop\爬虫\stopwords/'
    stopwords = get_stopwords(stopwords_path)
    result = create_wordcloud(result, stopwords, 30)
    words = jieba.lcut(result)
    counter = collections.Counter(words)
    most_common_words = counter.most_common(20)
    print(most_common_words)


sched.add_job(func_main, 'cron', hour=str(20), minute=str(30), second=str(0),
              id='t1', misfire_grace_time=5)

sched.start()
