from pathlib import Path

import numpy as np
import torch
import json
import pandas as pd
import nltk
from nltk.tokenize import word_tokenize
import jieba


def pre_process():
    # set proper filepath
    file_dir = 'D:/DATASET/translation2019zh/train/'
    src_datapath = file_dir + 'translation2019zh_train.json'

    tokenized_english_txt = file_dir + 'tokenized_english.json'
    tokenized_chinese_txt = file_dir + 'tokenized_chinese.json'
    english_token = file_dir + 'english_tokens.json'
    chinese_token = file_dir + 'chinese_tokens.json'

    english_text = []
    chinese_text = []

    e_token_dict = {}
    c_token_dict = {}
    e_idx = 0
    c_idx = 0

    src_file = open(src_datapath, 'r', encoding='utf-8')
    eng_tokenized_file = open(tokenized_english_txt, 'w', encoding='utf-8')
    chn_tokenized_file = open(tokenized_chinese_txt, 'w', encoding='utf-8')
    eng_tokens = open(english_token, 'w', encoding='utf-8')
    chn_tokens = open(chinese_token, 'w', encoding='utf-8')

    for line in src_file:
        item = json.loads(line)

        # tokenize the sentence
        eng_sentence = word_tokenize(item['english'])
        # print(eng_sentence)
        # add the tokenized sentence into a file
        json.dump({'src': eng_sentence}, eng_tokenized_file)

        # search tokens in token dict,if not there, then add
        for token in eng_sentence:
            if token not in e_token_dict:
                # (key, value) = (word, idx)
                e_token_dict[token] = e_idx
                e_idx += 1
        print('finished one english sentence, current token num is {}'.format(e_idx))

        # same as above, except using jieba module instead
        chn_sentence = jieba.lcut(item['chinese'])
        # print(chn_sentence)
        json.dump({'src': chn_sentence}, chn_tokenized_file, ensure_ascii=False)
        for token in chn_sentence:
            if token not in c_token_dict:
                c_token_dict[token] = c_idx
                c_idx += 1
        print('finished one chinese sentence, current token num is {}'.format(c_idx))

    json.dump(e_token_dict, eng_tokens, indent=4)
    json.dump(c_token_dict, chn_tokens, ensure_ascii=False, indent=4)


if __name__ == '__main__':
    # nltk.download('punkt')

    # sen = '为什么会这样？我是铸币'
    # res = jieba.lcut(sen)
    # print(res)

    pre_process()
