# 结巴分词类

import re
from pathlib import Path
from functools import partial

import jieba


class CutWord:
    def __init__(self,
                 stop_words_path=None,
                 userdict_path=None,
                 thesaurus_path=None):
        # stop words
        self.stop_words = {
            i.strip()
            for i in Path(stop_words_path).open(encoding='utf-8').readlines()
        } if stop_words_path else {}

        # userdict
        jieba.load_userdict(userdict_path) if userdict_path else None

        # thesaurus
        if thesaurus_path:
            d = {}
            for line in Path(thesaurus_path).open(
                    encoding='utf-8').readlines():
                l = line.split(',')
                for i in l[1:]:
                    d[i] = l[0]
        else:
            d = {}
        self.thesaurus = d
        self.cn_re = re.compile('[\u4e00-\u9fa5]+')

    def cut(self, sentence, less_length=2, mode='', *args, **kwargs):
        '分词'
        # mode textrank,tfidf
        # textrank args:topK=20,withWeight=False,allowPOS=('ns', 'n', 'vn', 'v'),withFlag=False
        # tfidf args:topK=20,withWeight=False,allowPOS=(),withFlag=False

        if mode == 'textrank':
            func = partial(jieba.analyse.textrank, *args, **kwargs)
        elif mode == 'tfidf':
            func = partial(jieba.analyse.tfidf, *args, **kwargs)
        else:
            func = partial(jieba.cut, *args, **kwargs)
        for s in func(sentence):
            # 长度 & 中文(start) filter
            if len(s) < less_length or not self.cn_re.match(s):
                continue
            # stop words filter
            if s in self.stop_words:
                continue
            # thesaurus
            if s in self.thesaurus:
                s = self.thesaurus[s]
            yield s
