# -*- coding:utf-8 -*-
import jieba

common_words = ["怎么样"]
my_dict_words = ["北京", "天气"]


def load_user_dict(path):
    user_dict = set([])
    common_words_set = set(common_words)
    with open(path, 'r', encoding='utf-8') as fr:
        for line in fr:
            if len(line.rstrip()) <= 2:
                continue
            if line.rstrip() in common_words:
                continue
            user_dict.add(line.rstrip())
    for word in my_dict_words:
        user_dict.add(word)
    return user_dict


class Tokenizer(object):
    def __init__(self, user_dict_path=None):
        user_dict = load_user_dict(user_dict_path) if user_dict_path is not None else []
        self._user_dict = user_dict
        # for word in user_dict:
        #     if len(word) > 3:
        #         continue
        #     jieba.add_word(word)
        # pass

    @property
    def user_dict(self):
        return self._user_dict

    @staticmethod
    def tokenize(sentence):
        words = list(jieba.cut(sentence.lower()))
        ret_words = []
        special_words = []
        for word in words:
            if word in [" ", ""]:
                continue
            if not is_chinese(word) and word not in ['!', '！', '#', ',', '，', '.', '。', '?', '?']:
                special_words += [word]
                continue
            if len(special_words) > 0:
                ret_words += ["".join(special_words)]
                special_words = []

            ret_words += [word]

        if len(special_words) > 0:
            ret_words += ["".join(special_words)]

        return ret_words

    def ner(self, query):
        m = len(query)
        ner_words = []
        i = 0
        while i < m:
            j = m - 1
            while j > i:
                wij = query[i:j+1]
                if wij in self.user_dict and is_chinese(wij):
                    ner_words += [wij]
                    break
                j -= 1
            i = j + 1
        return ner_words
        pass


def is_chinese(word):
    for ch in word:
        if '\u4e00' <= ch <= '\u9fff':
            return True
    return False


if __name__ == '__main__':
    tokenizer = Tokenizer(user_dict_path="../data/baike.word.clean.txt")
    sent = u"#520#每天都要开心加藤鹰的西域男孩过北京天气噢怎么样！	什么东东。！怎么做的？"
    print(", ".join(tokenizer.ner(sent)))
    print(", ".join(tokenizer.tokenize(sent)))
    pass
