import json
import time
import jieba
import nltk
import zhon.hanzi
from nltk.corpus import stopwords
from snownlp import SnowNLP
import thulac

# 标点符号
# punc = zhon.hanzi.punctuation
# # 停顿词
#
# stopwords_cn = stopwords.words("chinese")

with open("../file_location.json", 'r', encoding="utf-8") as load_file:
    CONFIG = json.load(load_file)

data_location = CONFIG["location"]["data_root"]
data_relate = data_location + CONFIG["location"]["data_relate"]
data_apart = data_location + CONFIG["location"]["data_apart"]
data_statistic = data_location + CONFIG["location"]["data_statistic"]

word_relate = data_location + 'tempSearchData.txt'
word_apart_snowNLP = data_apart + 'snowNLP.txt'
word_apart_jieba = data_apart + 'jieba.txt'
word_apart_thulac = data_apart + 'thulac.txt'

word_static = data_statistic + '中南大学.txt'


def statistic_snowNLP(word_input, word_apart_file, word_statistics):
    print("开始snowNLP分词")

    time_start = time.time()

    f_outcome = open(word_apart_file, 'a', encoding="utf-8")

    with open(word_input, encoding="utf-8") as word_input:
        # text = word_input.readlines()
        text = word_input.read()

    snownlp = SnowNLP(text)
    f_outcome.write(str(snownlp.words))
    # for line in text:
    #     # print(line)
    #     if len(line) != 1:
    #         snownlp = SnowNLP(line)
    #         f_outcome.write(str(snownlp.words))
    #         f_outcome.write('\n')

    time_end = time.time()
    time_cost = time_end - time_start
    f_outcome.write("耗时：\t")
    f_outcome.write(str(time_cost))

    print("snowNLP耗时:", time_cost)
    return time_cost


def statistic_jieba(word_input, word_apart_file, word_statistics):
    print("开始jieba分词")
    time_start = time.time()

    with open(word_input, encoding="utf-8") as word_input:
        text = word_input.read()
    # print(text)
    word_apart_outcome = jieba.lcut(text)

    result = open(word_apart_file, 'a', encoding="utf-8")
    result.write(' '.join(word_apart_outcome))

    time_end = time.time()
    time_cost = time_end - time_start
    result.write("\n\n\n\n耗时：\t")
    result.write(str(time_cost))
    print("jieba耗时:", time_cost)

    return time_cost

def statistic_thulac(word_input, word_apart_file, word_statistics):
    print("开始thulac分词")
    time_start = time.time()

    with open(word_input, encoding="utf-8") as word_input:
        text = word_input.read()

    thu = thulac.thulac(seg_only=True)
    text_outcome = thu.cut(text, text=True)
    result = open(word_apart_file, 'a', encoding="utf-8")
    result.write(text_outcome)

    time_end = time.time()
    time_cost = time_end - time_start
    result.write("\n\n\n\n耗时：\t")
    result.write(str(time_cost))
    print("thulac耗时:", time_cost)

    return time_cost


if __name__ == '__main__':
    # time_start = time.time()
    time_jieba = statistic_jieba(word_relate, word_apart_jieba, word_static)
    time_thulac = statistic_thulac(word_relate, word_apart_thulac, word_static)
    time_snowNLP = statistic_snowNLP(word_relate, word_apart_snowNLP, word_static)

    result_time_cost = {
        "time_jieba": time_jieba,
        "time_thulac": time_thulac,
        "time_snowNLP": time_snowNLP
    }
    result_file = open("../result/apart.json", 'w', encoding="utf-8")

    json.dump(result_time_cost, result_file, indent=4)
