import jieba

stopswords_path = r'D:\data\sohu\stopwords\中文停用词表.txt'


def segment(sentence):
    """

    :param sentence:
    :return: list
    """
    if isinstance(sentence, list):
        words = stopwords(list(jieba.cut(sentence[0])))
        return words
    if isinstance(sentence, str):
        words = stopwords(list(jieba.cut(sentence)))
        return words


def stopwords(words):
    sws = []
    with open(stopswords_path, mode='r', encoding='utf-8') as f:
        for line in f:
            sws.append(line)
    corpus = [word for word in words if word not in sws]
    return corpus


def cut_sentence(document, num):
    """
    把一篇文档用逗号分割，并只保留num个,不够长的填充，过长的截断
    :param document:
    :param num:
    :return: 例如：[['pad'], ['<pad>'], ['<pad>'], ['<pad>'], ['<pad>']]
    """
    sentence_li = document.split('。')
    if len(sentence_li) >= num:
        sentence_li = sentence_li[:num]
    else:
        sentence_li.extend(["<pad>"] * (num - len(sentence_li)))
    tem = []
    for i in sentence_li:
        tem.append([i])
    return tem


def cut_paragraph(document, p_num, s_num):
    """
    把一篇文档用双空格分割，并只保留num个,不够长的填充，过长的截断
    :param s_num:
    :param p_num:
    :param document:
    :return: [[[]]] 三维list
    """
    paragraph_li = document.split(' ')
    if len(paragraph_li) >= p_num:
        paragraph_li = paragraph_li[:p_num]
    else:
        paragraph_li.extend(['pad'] * (p_num - len(paragraph_li)))
    tem_p = []
    for i in paragraph_li:
        tem_p.append(cut_sentence(i, s_num))
    return tem_p
