"""
分词
"""
import logging
import jieba
import jieba.posseg as psg
import config
import re
import string
from lib import stopwords

# 关闭jieba log输出
jieba.setLogLevel(logging.INFO)
# 加载词典
jieba.load_userdict(config.user_dict_path)
# 单字分割，英文部分
letters = string.ascii_lowercase
# 单字分割 去除的标点
filters = [",", "-", ".", " "]


def cut(sentence, by_word=False, use_stopwords=False, with_sg=False):
    assert by_word != True or with_sg != True, "根据word切分时候无法返回词性"
    if by_word:
        return cut_sentence_by_word(sentence)
    else:
        ret = psg.lcut(sentence)
        if use_stopwords:
            ret = [(i.word, i.flag) for i in ret if i.word not in stopwords]
        if not with_sg:
            ret = [i.word for i in ret]
        return ret


def cut_sentence_by_word(sentence):
    # 对中文按照字进行处理，对英文不分为字母
    sentence = re.sub("\s+", " ", sentence)
    sentence = sentence.strip()
    result = []
    temp = ""
    for word in sentence:
        if word.lower() in letters:
            temp += word.lower()
        else:
            if temp != "":  # 出现中文，把英文添加到结果中
                result.append(temp)
                temp = ""
            if word.strip() in filters:  # 标点符号
                continue
            else:  # 是单个字
                result.append(word)
    if temp != "":  # 最后的temp中包含字母
        result.append(temp)
    return result


if __name__ == '__main__':
    a = "python和c++哪个难？"
    word = cut_sentence_by_word(a)
    print(word)
