from jieba import analyse
from utils.common.logger import log
import os
import re
import jieba

stop_words_path = os.path.dirname(__file__) + "/data/stopwords.txt"


class WordAnalyzer:
    """
    对文本做词汇相关处理，包括分词、关键词提取、热词统计

    Attributes:
        stopwords_list，停用词表
    """

    def __init__(self):
        self.stopwords_list = [k.strip() for k in open(
            stop_words_path, encoding='utf8').readlines()]
        analyse.set_stop_words(stop_words_path)
        log.logger.info("WordAnalyzer initialized...")

    def seg(self, text: str):
        """将给定的文本分词，返回分词列表"""
        fil = re.compile(r"[^0-9a-zA-Z\u4e00-\u9fa5]+")
        text = fil.sub(" ", text)

        word_list = [
            word for word in jieba.cut(text) if word.strip() and word not in self.stopwords_list]
        return word_list

    def tag_news(self, inData: list):
        """
        接收一个json对象(list)格式的新闻数据集，为其打上标签后返回
        """

        for idx, obj in enumerate(inData):
            if((idx+1) % 1000 == 0):
                log.logger.info("{} news has been tagged".format(idx+1))

            text = obj["content"]
            if text is not None:
                keywords = analyse.extract_tags(text, topK=5)
                obj["keywords"] = "/".join(keywords)
            else:
                obj["keywords"] = ""

        return inData

    def get_hotwords(self, data: list):
        """
        接收一个json对象(list)格式的[{关键词},{关键词},...]集合，计算热词

        返回值形式，dict：{ '热词':热度,... }
        """

        hwords = {}
        words = []
        for obj in data:
            words += obj["keywords"].split("/")

        for word in words:
            hwords[word] = words.count(word)

        return sorted(hwords.items(), key=lambda x: x[1], reverse=True)[:100]
