import os
import jieba
import re
from sklearn.feature_extraction.text import CountVectorizer
from collections import defaultdict
from sklearn.feature_extraction.text import CountVectorizer
import numpy as np
from nltk.corpus import wordnet
import copy
import pickle
import json
#选取题目关键词

def key_words_title():

    path = 'data_clean1'
    files = os.listdir(path)
    # print(type(files),files)、

    #定义数据存储列表
    raw=[]
    result = [] #清洗之后的词汇列表

    #读取所有的text
    for i in files:
        f = open(path + '\\' + i, 'rb')
        data = pickle.load(f, encoding='bytes')
        title=data["title"]
        seg_list_title = list(jieba.cut_for_search(title))
        raw+=seg_list_title
    stopword_list = []
    stopword_list.extend(["\n", "\r", "\t", "\r\n", "\n\r", ",", ' ', '【', '’】',"：","，"])
    for w in raw:
        if w not in stopword_list:
            result.append(w)
    result=set(result)
    result=list(result)
    print(len(result))
    f = open("key_words_title", 'wb')  # 打开原始文档
    pickle.dump(result,f) # 全部文档
    f.close()
    # print(text_list)
key_words_title()