import ast
import os

from dataloaer import MyCustomDataset, dateset2loader
from readdata import readfile
from vocab import tokenizer, Vocab, mapping

en_sentence_list, ch_sentence_list = readfile()
en_token_sentence_list = tokenizer(en_sentence_list)
ch_token_sentence_list = tokenizer(ch_sentence_list, ch=True)

if not os.path.exists("./vocab/vocab_en.txt") and not os.path.exists("./vocab/vocab_en.txt"):
    vocab_en = Vocab(en_token_sentence_list, init=True)
    vocab_ch = Vocab(ch_token_sentence_list, init=True)
    with open("./vocab/vocab_en.txt", "w", encoding="utf8") as f:
        f.write(str(vocab_en.vocab))
    with open("./vocab/vocab_ch.txt", "w", encoding="utf8") as f:
        f.write(str(vocab_ch.vocab))
else:
    with open("./vocab/vocab_en.txt", "r", encoding="utf8") as f:
        vocab_en = Vocab(ast.literal_eval(f.readline()), init=False)
    with open("./vocab/vocab_ch.txt", "r", encoding="utf8") as f:
        vocab_ch = Vocab(ast.literal_eval(f.readline()), init=False)

vocab_input_len = len(vocab_en)
vocab_target_len = len(vocab_ch)
print("vocab_input_len:", vocab_input_len)
print("vocab_target_len:", vocab_target_len)
vector_en_list = []
for sentence in en_token_sentence_list:
    vector_en_list.append(mapping(sentence, vocab_en))
vector_ch_list = []
for sentence in ch_token_sentence_list:
    vector_ch_list.append(mapping(sentence, vocab_ch))
trainSet = MyCustomDataset(list(zip(vector_en_list, vector_ch_list)))
max_len_en, max_len_ch = trainSet.get_sentenceMaxLen()
print("max_len_en, max_len_ch:", max_len_en, max_len_ch)
trainDataLoader = dateset2loader(trainSet, max_len_en, max_len_ch)

for en_text, ch_text in trainDataLoader:
    print(en_text[0])
    print(ch_text[0])
    print(mapping(en_text[0], vocab_en, reverse=True))
    print(mapping(ch_text[0], vocab_ch, reverse=True))
    break
