from utils.dict_saver_loader import *
import math
merge_dict_dir='../../hard_drive/entropy/dict/merge.txt'
merge_dict=load_dict(merge_dict_dir)

print(sum(list(merge_dict.values())))
print(len(merge_dict.keys()))
def calc_entropy():
    # sum_num=sum(list(merge_dict.values()))
    sum_num=1652753838
    key_num_processed=0
    entropy_ret=0
    for key in merge_dict:
        p=merge_dict[key]/sum_num
        entropy_ret-=p*math.log2(p)
        key_num_processed+=1
        if key_num_processed%10000==0:
            print(key_num_processed,'processed')
    print(entropy_ret)


if __name__=='__main__':
    calc_entropy()

'''
In the news2016zh_train.json file, there are totally 1,652,753,838
(1.6t) words and 4,760,316 different words(including symbols and numbers
seperated by jieba.py. And the finally entropy result is 11.875 using 
sigma(-p*log2(p)).
'''