"""
分词
"""
import string

import jieba
import jieba.posseg as psg
import chatbot.config as config
from .stopwords import stopwords

import logging

jieba.setLogLevel(logging.INFO)

jieba.load_userdict(config.user_dict_path)
letters = string.ascii_lowercase + "+"  # 这样+也就认为是一个小写英文字符了


def cut_sentence_by_word(sentence):
    """
    实现中英文都有的句子进行分词
    :param sentence:
    :return:
    """
    result = []
    temp = ""
    for word in sentence:
        word = word.lower()
        if word in letters:
            temp += word
        else:
            # 如果是汉字且temp里已经有东西了，则表示前面都是英文，把temp放入result
            if temp != "":
                result.append(temp)
                temp = ""
            result.append(word)
    if temp != "":
        result.append(temp)
    return result


def cut(sentence, by_word=False, use_stopword=False, with_sg=False):
    """

    :param sentence: 句子
    :param by_word: 是否按照单个字进行分词
    :param use_stopword: 是否使用停用词
    :param with_sg: 是否返回词性
    :return:
    """
    if by_word:
        result = cut_sentence_by_word(sentence)
    else:
        result = psg.lcut(sentence)
        result = [(item.word, item.flag) for item in result]
        if not with_sg:
            result = [item[0] for item in result]
    if use_stopword:
        if with_sg:
            result = [i for i in result if i[0] not in stopwords]
        else:
            result = [i for i in result if i not in stopwords]
    if with_sg:
        result = [(i[0].strip(), i[1]) for i in result if len(i[0].strip()) > 0]
    else:
        result = [i.strip() for i in result if len(i.strip()) > 0]
    return result
