import numpy as np
import re,jieba
from itertools import chain
import openfile
if __name__ =="__main__":
    # #打开文件
    sentences_list = []
    sentences_list = openfile.openfile('data/吴邦国重申：中国坚持和平发展道路不会因国力地位变化而改变_共产党员网.txt')

    #加载停用词
    stoplist= [word.strip() for word in open('data/stopwords.txt', encoding='utf-8').readlines()]
    # print(stoplist)

    # 对句子进行分词
    def seg_depart(sentence):
        # 去掉非汉字字符
        sentence = re.sub(r'[^\u4e00-\u9fa5]+','',sentence)
        sentence_depart = jieba.cut(sentence.strip())
        word_list = []
        for word in sentence_depart:
            if word not in stoplist:
                word_list.append(word)
        # 如果句子整个被过滤掉了，如：'02-2717:56'被过滤，那就返回[],保持句子的数量不变
        return word_list

    sentence_word_list = []
    for sentence in sentences_list:
        line_seg = seg_depart(sentence)
        sentence_word_list.append(line_seg)
    print("一共有",len(sentences_list),'个句子。\n')
    print("前10个句子分词后的结果为：\n",sentence_word_list[:10])

    # 保证处理后句子的数量不变，我们后面才好根据textrank值取出未处理之前的句子作为摘要。
    if len(sentences_list) == len(sentence_word_list):
        print("\n数据预处理后句子的数量不变！")
