from jieba import load_userdict, cut
import re


# 分句
def cut_sent(para):
    para = re.sub('([。！？\?])([^”’])', r"\1\n\2", para)  # 单字符断句符
    para = re.sub('(\.{6})([^”’])', r"\1\n\2", para)  # 英文省略号
    para = re.sub('(\…{2})([^”’])', r"\1\n\2", para)  # 中文省略号
    para = re.sub('([。！？\?][”’])([^，。！？\?])', r'\1\n\2', para)
    # 如果双引号前有终止符，那么双引号才是句子的终点，把分句符\n放到双引号后，注意前面的几句都小心保留了双引号
    para = para.rstrip()  # 段尾如果有多余的\n就去掉它
    # 很多规则中会考虑分号;，但是这里我把它忽略不计，破折号、英文双引号等同样忽略，需要的再做些简单调整即可。
    return para.split("\n")


# 分词
def cutWord(paraList, path):
    if path != '':
        load_userdict(path)
    cutWordsList = []
    for sentence in paraList:
        cutWords = [word for word in cut(sentence)]
        cutWordsList.append(cutWords)
    return cutWordsList


# 去除停用词
def remStopWord(sentenceList):
    with open('停用词.txt', mode='r', encoding='utf-8') as file:
        stopWords = file.read()
    stopWordsList = [word for word in cut(stopWords)]
    remStopWords = ''
    for word in sentenceList:
        if word not in stopWordsList:
            remStopWords += word
    return [word for word in cut(remStopWords)]


# 列表降维
def decList(list):
    newList = []
    for item in list:
        newList += item
    return newList