import jieba
import jieba.analyse
import pandas as pd


class KeywordExtractor(object):

    def __init__(self):
        # 设定分词及清理停用词函数
        self.stop_list = list(
            pd.read_table("config/stop_words.txt", names=["w"], sep='aaa', encoding="utf-8", engine="python").w
        )

    def process_text(self, txt):
        word_list = [w for w in jieba.lcut(txt) if w not in self.stop_list and len(w) > 1]

        return ' '.join(word_list)

    @staticmethod
    def get_keyword_list(words_list, param, use_pos=True):
        if use_pos:
            # 选定部分词性
            allow_pos = ('n', 'nr', 'nr1', 'nr2', 'ns', 'nsf', 'nt', 'nz', 'nl', 'ng', 'nr', 'vn')
        else:
            allow_pos = ()
        if param == 'tfidf':
            tfidf_keywords = jieba.analyse.extract_tags(words_list, topK=10, withWeight=False, allowPOS=allow_pos)
            return tfidf_keywords
        elif param == 'textrank':
            textrank_keywords = jieba.analyse.textrank(words_list, topK=10, withWeight=False, allowPOS=allow_pos)
            return textrank_keywords

    # 获取两种算法的交集作为该文章的关键词
    @staticmethod
    def keyword_interact(tfidf_keyword, textrank_keyword):
        return list(set(tfidf_keyword).intersection(set(textrank_keyword)))

    # 获取两种算法的前三的并集作为关键词
    @staticmethod
    def keyword_topk(tfidf_keyword, textrank_keyword, k):
        combine = list(tfidf_keyword)
        for word in textrank_keyword:
            combine.append(word)
        return list(set(combine))
