import os

import requests
from bs4 import BeautifulSoup
import jieba
from wordcloud import WordCloud, ImageColorGenerator
from snownlp import SnowNLP
import matplotlib.pyplot as plt
import time


class Recognition:
    def __init__(self, url):
        res = requests.get(url)  # 请求这个url，用于爬虫操作
        res.encoding = 'utf-8'  # 将编码信息设为支持中文的
        soup = BeautifulSoup(res.text, 'html.parser')  # 解析html页面
        # 将获取的文章信息列表处理后变为字符串
        self.text = ''.join([news.text.strip() for news in soup.select("p")][1:-6])

    def get_wordcloud(self):
        con = jieba.lcut(self.text)  # 分词
        words = " ".join(con)  # 将分词信息后插入空格转化为字符串
        # 生成词云
        backgroud_Image = plt.imread('my_app/static/image/wordcloud_mark.jpg')
        wc = WordCloud(
            background_color='white',
            mask=backgroud_Image,
            font_path='static/simkai.ttf',
            max_words=2000,
            max_font_size=150,
            random_state=30,
        )
        wc.generate_from_text(words)
        img_colors = ImageColorGenerator(backgroud_Image)
        wc.recolor(color_func=img_colors)

        timestamp = str(time.strftime('%Y%m%d_%H%M%M', time.localtime(time.time())))
        img_path = 'my_app/static/image/' + timestamp + '.png'
        wc.to_file(img_path)  # 将词云图片保存到本地
        print('生成词云成功!')
        return img_path.replace("my_app", "..")  # 返回词云图片的路径

    def get_summary(self, limit):
        s = SnowNLP(self.text)  # 创建文章主题内容的对象，将字符串文本传参过去
        return set(s.summary(limit=limit))  # 返回limit条无重复的文章主题内容
