import zhon.hanzi
from nltk.corpus import stopwords

from algorithm import search_seedword_relation_and_statistics
from dataProcess import Read_data
import time
import jieba.posseg as posseg

# 标点符号
punc = zhon.hanzi.punctuation
# 停顿词
stopwords_cn = stopwords.words("chinese")

data_after_process = Read_data.process_data()


# print(data_after_process)
# word_apart_outcome = posseg.cut(data_after_process)

def create_word():
    word_to_create = open("D:/data/E-commerce/wordtocreate.txt", 'r', encoding="UTF-8")
    words = word_to_create.readlines()
    for line in words:
        word = line.split(' ')[0]
        print(word)
        search_seedword_relation_and_statistics(word)


if __name__ == '__main__':
    create_word()
