from utils.log_helper import log_init
from utils.data_helper import LoadEnglishGermanDataset
from utils.data_helper import tokenizer_dict

if __name__ == '__main__':
    logger = log_init('test_LoadEnglishGermanDataset')
    # ========== test tokenizer ==========
    # text = 'Moon Hotel, it's very interesting.'
    # out = tokenizer_dict['en'](text)
    # logger.debug(f'{out}')

    # ========== test build vocab ==========
    # de_tokenizer = tokenizer_dict['de']
    # de_vocab = build_vocab(de_tokenizer, '../data/train.de', 1)
    # tokens = ['<unk>', '<pad>', '<bos>', '<eos>']
    # indices = de_vocab.lookup_indices(tokens)
    # logger.debug(f' {tokens} ===> {indices}')
    # PAD_IDX = de_vocab['<pad>']
    # logger.debug(f'<pad> ===> {PAD_IDX}')

    file_paths = ['../data/train.de', '../data/train.en']
    dataLoader = LoadEnglishGermanDataset(train_file_paths=file_paths, tokenizer=tokenizer_dict)
    train_iter, valid_iter, test_iter = dataLoader.load_train_val_test_data(file_paths, file_paths, file_paths)

    logger.debug(dataLoader.PAD_IDX)
    for src, tgt in train_iter:
        tgt_input = tgt[:-1, :]
        tgt_out = tgt[1:, :]

        src_mask, tgt_mask, src_padding_mask, tgt_padding_mask = dataLoader.create_mask(src, tgt_input)
        logger.debug(f'src shape = {src.shape}')
        logger.debug(f'src_padding_mask [batch_size, src_len] = {src_padding_mask.shape}')
        logger.debug(f'tgt shape = {tgt.shape}')
        logger.debug(f'tgt_padding_mask [batch_size, src_len] = {tgt_padding_mask.shape}')

        logger.debug(f'tgt output shape = {tgt_out.shape}')
        logger.debug(f'tgt_mask shape [tgt_len, tgt_len] = {tgt_mask.shape}')

        break
