import jieba as jb
import re

#从指定文件路径中读取停用词 并存储在一个列表中返回
def stopwordslist(filepath):
    stopwords = [line.strip() for line in open(filepath, 'r', encoding='utf-8').readlines()]
   # print(stopwords)
    return stopwords

#加载字典 保留字
jb.load_userdict("D:\\pycharmproject\\高校图书采购主题分析\\user_dict.txt")


# 对句子进行分词
def seg_sentence(sentence):

    #正则表达式移除句子中所有的数字和标点符号
    #sentence =re.sub(r'[^\d\W]', '', sentence)←这个有问题导出只有数字
    sentence = re.sub(u'[0-9\\.]+', u'', sentence)#仅移除数字和小数点

    sentence_seged = jb.cut(sentence.strip())
    stopwords = stopwordslist('D:\\pycharmproject\\高校图书采购主题分析\\stopwords.txt')  # 这里加载停用词的路径
    outstr = ''
    for word in sentence_seged:
        if word not in stopwords and word.__len__()>1:
            if word != '\t':
                outstr += word
                outstr += " "
    return outstr

#E:\allme\learning\reproducing-the-code-from-a-paper\rcp0810-for-CNIK-Data
# 读取&输出路径r是只读 w是写文件 a是追加文件
inputs= open("D:\\pycharmproject\\高校图书采购主题分析\\CNKI.txt", "r", encoding='utf-8')
outputs = open("D:\\pycharmproject\\高校图书采购主题分析\\CNKI-output.txt", "w", encoding='utf-8')

for line in inputs:
    line_seg = seg_sentence(line)  # 这里的返回值是字符串
    outputs.write(line_seg + '\n')

outputs.close()
inputs.close()