import jieba
import time
import multiprocessing
from utils.dict_saver_loader import *
import os


def mul_thread_unigram_fenci():
    text_dir='../../hard_drive/entropy/news2016zh_validft.txt'
    num_threads=8
    # dicts_dir='../../hard_drive/entropy/dict'+time.strftime("%b %d %H:%M:%S", time.localtime())
    dicts_dir = '../../hard_drive/entropy/unigram'
    os.mkdir(dicts_dir)

    def readlines_and_count(start_line,end_line,thread_No,lines):
        start=time.time()
        word_show_time_dict={}
        for i in range(start_line,end_line):
            seg = jieba.cut(lines[i], cut_all=False)
            seg_list = list(seg)
            for word in seg_list:
                if word not in word_show_time_dict:
                    word_show_time_dict[word] = 1
                else:
                    word_show_time_dict[word] += 1
            if i % 1000 == 0:
                print(thread_No,'thread, ',i // 1000, 'k lines processed')
        dict_txt_dir =  dicts_dir+'/'+ str(thread_No) + '.txt'
        save_dict(word_show_time_dict,dict_txt_dir)
        end=time.time()
        print('Save done!',thread_No,'thread time use: ',end-start)


    f=open(text_dir,'r',encoding='utf-8')
    lines=f.readlines()
    num_lines=len(lines)
    num_lines_every_thread=num_lines//num_threads
    start_lines=[]
    end_lines=[]
    for i in range(num_threads):
        start_lines.append(i*num_lines_every_thread)
        end_lines.append((i+1)*num_lines_every_thread)

    threads_list=[]
    for i in range(num_threads):
        threads_list.append(multiprocessing.Process(target=readlines_and_count,args=(start_lines[i],end_lines[i],i,lines)))

    for t in threads_list:
        t.start()


if __name__=='__main__':
    mul_thread_unigram_fenci()

