import torch
import numpy as np


# todo use torch Dataset API
def gen_data_loader(data_file, tokenizer, vocab_dict, vocab_size, batch_size):
    fd = open(data_file)
    batch = []
    target_batch = []

    for idx, l in enumerate(fd):
        d = tokenizer(l)

        indices = [vocab_size - 3] + [vocab_dict.get(k, vocab_size - 2) for k in d]
        indices_tensor = torch.LongTensor(indices)
        target_indices = indices[1:] + [vocab_size - 1]  # plus EOS
        target_indices_tensor = torch.LongTensor(target_indices)
        if len(indices) > 0:
            batch.append((indices_tensor, len(indices)))
            target_batch.append((target_indices_tensor, len(target_indices)))

        if idx != 0 and len(batch) == batch_size:
            lens = [l for _, l in batch]
            idx = np.argsort(lens)[::-1]
            yield [batch[i] for i in idx], [target_batch[i] for i in idx]
            batch = []
            target_batch = []

    # process last batch
    length = len(batch)
    last_batch = []
    last_target_batch = []

    for i in range(length):
        last_batch.append(batch[i % batch_size])
        last_target_batch.append(target_batch[i % batch_size])

    lens = [l for _, l in batch]
    idx = np.argsort(lens)[::-1]
    yield [batch[i] for i in idx], [target_batch[i] for i in idx]
    fd.close()

