import json
import time

import jieba
import nltk
import zhon.hanzi
from nltk.corpus import stopwords

# 标点符号
punc = zhon.hanzi.punctuation
# 停顿词

stopwords_cn = stopwords.words("chinese")

with open("../file_location.json", 'r', encoding="utf-8") as load_file:
    CONFIG = json.load(load_file)
data_location = CONFIG["location"]["data_root"]
data_relate = data_location + CONFIG["location"]["data_relate"]
data_apart = data_location + CONFIG["location"]["data_apart"]
data_statistic = data_location + CONFIG["location"]["data_statistic"]

word_relate = data_relate + '中南大学.txt'
word_apart = data_apart + '中南大学.txt'
word_static = data_statistic + '中南大学.txt'


def statistic(word_input, word_apart_file, word_statistics):
    # print("开始jieba分词")
    time_start = time.time()
    with open(word_input, encoding="utf-8") as word_input_file:
        text = word_input_file.read()

    word_apart_outcome = jieba.lcut(text)

    result = open(word_apart_file, 'a', encoding="utf-8")
    result.write(' '.join(word_apart_outcome))
    # 统计词频
    counts = {}
    for word in word_apart_outcome:
        if len(word) > 1 and word != '\n' and word != '\t':
            counts[word] = counts.get(word, 0) + 1

    #      取出停顿词
    for word in stopwords_cn:
        counts.pop(word, 0)
    # 词频排序统计

    time_end = time.time()
    # print("jieba分词消耗时间为：", (time_end - time_start))
    ls_sorted = sorted(counts.items(), key=lambda x: x[1], reverse=True)
    # print(ls_sorted)

    with open(word_statistics, 'w', encoding="utf-8") as statistic_result:
        statistic_result.write('\n'.join('%s %s' % x for x in ls_sorted))

    word_input_file.close()
    result.close()
    statistic_result.close()


if __name__ == '__main__':
    statistic(word_relate, word_apart, word_static)
