from config import *
import json
from util import *
from collections import Counter

def generate_vocab():
    en_vocab = ['<pad>','<unk>','<sos>','<eos>']
    zh_vocab = ['<pad>','<unk>','<sos>','<eos>']

    zh_vocab_list = []
    en_vocab_list = []

    #解析json文件
    with open(TRAIN_SAMPLE_PATH,encoding='utf-8') as file:
        lines = json.loads(file.read())
        for en_sent,zh_sent in lines:
            zh_vocab_list += divided_zh(zh_sent)
            en_vocab_list += divided_en(en_sent)
            # print(zh_vocab_list)
            # print(en_vocab_list)
        #过滤生僻词
        zh_vocab_kv = Counter(zh_vocab_list).most_common()
        # print(zh_vocab_kv)
        zh_vocab += [k.lower() for k,v in zh_vocab_kv]# 将中文词汇表中的每个词汇转换为小写并添加到zh_vocab列表中

        en_vocab_kv = Counter(en_vocab_list).most_common()
        en_vocab += [k.lower() for k, v in en_vocab_kv]

        # print(zh_vocab)
        # print(en_vocab)

        #生成词表文件
        with open(ZH_VOCAB_PATH,'w',encoding='utf-8') as file:
            file.write('\n'.join(zh_vocab))
        with open(EN_VOCAB_PATH,'w',encoding='utf-8') as file:
            file.write('\n'.join(en_vocab))

if __name__ == '__main__':
    generate_vocab()
