import pickle
import os
import jieba
import re
path = 'data1'
path_clean = 'data_clean_key_cut'
path_full="data_full1_keywords"
files = os.listdir(path)

#数据清洗

def fenci():
    for i in files:
        text=[]
        result = []  # 清洗之后的词汇列表
        f = open(path + '\\' + i, 'rb')   #打开原始文档
        data = pickle.load(f, encoding='bytes')   #全部文档
        text=data["context"]
        title=data["title"]
        text = re.sub("[A-Za-z0-9\!\%\[\]\]\[\_\/\<\>\=\"\"\.\;\*\:\#\-\(\)\|\{\}\+\|\&\?]", "", text)
        # text=text.strip('\n')
        text="".join([s for s in text.strip().splitlines(True) if s.strip()])
        # print(text)
        f_stop = open("stop_words_ch-停用词表.txt",'r',encoding="ANSI")  #打开停用词表A
        stopword_list = [word.strip('\n') for word in f_stop.readlines()]
        stopword_list.extend(["\n","\r","\t","\r\n","\n\r",",",' ','【','’】'])
        # new_i=i.rsplit('.',1)
        # new_i = new_i[0] +"clean.txt" #新文件名字
        f1 = open(path_clean + '\\' + i, "wb")   #清洗之后的数据
        f2 = open(path_full + '\\' + i, "wb")
        seg_list_context = list(jieba.cut_for_search(text))  #context
        seg_list_title = list(jieba.cut_for_search(title))  # title
        #进行加权处理，这里仅仅对于第一段（题目）的权重*5，（第一段词汇*5）
        # for word in seg_list:
        #     if word != "\n":
        #         for j in range(5):
        #             count_list.append(word)
        #     else:
        #         break
        for word in seg_list_title:
            for i in range(5):
                seg_list_context.append(word)
        for w in seg_list_context:
            if w not in stopword_list:
                result.append(w)
        # for word in result:
        #     f1.write(word + ' ')
        result_dict=dict([('url',data['url']),('title',data['title']),('time',data['time']),('context',result),('title_cut',seg_list_title)])
        print(result_dict)
        # pickle.dump(result_dict,f1)  #分词、去停用词之后的结果
        full_dict = dict([('url', data['url']), ('title', data['title']), ('time', data['time']), ('context', text),('title_cut',seg_list_title)])
        pickle.dump(full_dict, f2)  # 文章原文
        print(full_dict)
        # f.close()
        # f_stop.close()
        # f1.close()
        # f.close()
fenci()