"""
https://github.com/SophonPlus/ChineseNlpCorpus
https://github.com/goto456/stopwords
"""
import logging

import jieba
from tqdm import tqdm

jieba.setLogLevel(logging.INFO)

data_path = "sources/weibo_senti_100k.csv"
data_stop_path = "sources/hit_stopwords.txt"
data_list = open(data_path, encoding="utf-8").readlines()[1:]
stops_word = open(data_stop_path, encoding='utf-8').readlines()
stops_word = [line.strip() for line in stops_word]
filters = ['!', '"', '#', '$', '%', '&', '\(', '\)', '\*', '\+', ',', '-', '\.', '/', ':', ';', '<', '=', '>',
           '\?', '@'
    , '\[', '\\', '\]', '^', '_', '`', '\{', '\|', '\}', '~', '\t', '\n', '\x97', '\x96', '”', '“', ]
stops_word.extend(filters)

min_seq = 2  # 设置最小出现次数，小于该值的词认为不重要忽略掉，降低计算量
top_n = 1000  # 字典最大容量就1000

UNK = "<UNK>"
PAD = "<PAD>"

voc_dict = {}
for item in tqdm(data_list):
    label = item[0]
    content = item[2:].strip()
    seg_list = jieba.cut(content, cut_all=False)
    seg_res = []
    for seg_item in seg_list:
        if seg_item in stops_word:
            continue
        seg_res.append(seg_item)
        if seg_item in voc_dict.keys():
            voc_dict[seg_item] = voc_dict[seg_item] + 1
        else:
            voc_dict[seg_item] = 1
voc_list = sorted([_ for _ in voc_dict.items() if _[1] > min_seq], key=lambda x: x[1], reverse=True)
voc_dict = {word_count[0]: idx for idx, word_count in enumerate(voc_list)}
voc_dict.update({UNK: len(voc_dict), PAD: len(voc_dict)})

ff=open("sources/dict","w")
for item in voc_dict.keys():
    ff.writelines("{},{}\n".format(item,voc_dict[item]))
ff.close()
