import torch

from dataset.vocab import Vocab, read_tokens, read_txt, tokenizer
# from vocab import Vocab, read_tokens, read_txt, tokenizer # 调用者实际考虑引入的文件位置
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader


def _generate_data(vocab, tokens):
    # 长度对齐
    tokens = read_tokens(tokens)
    # 将整个英文文章转化为对应的idx
    tokens_idx = [vocab.to_idx(token) for line in tokens for token in line]

    seq_len = 30
    inputs = []
    outputs = []
    for i in range(len(tokens_idx) - seq_len):
        inputs.append(tokens_idx[i:i + seq_len])
        outputs.append(tokens_idx[i + 1:i + 1 + seq_len])

    inputs = torch.tensor(inputs)
    outputs = torch.tensor(outputs)
    return inputs, outputs


class RNNDataset(Dataset):
    def __init__(self, inputs, outputs):
        super().__init__()
        self.inputs = inputs
        self.outputs = outputs

    def __len__(self):
        return len(self.inputs)

    def __getitem__(self, index):
        return self.inputs[index], self.outputs[index]


def generate_loader_vocab(batch_size=10):
    tokens = tokenizer(read_txt())
    vocab = Vocab(tokens)
    inputs, outputs = _generate_data(vocab, tokens)
    dataset = RNNDataset(inputs, outputs)
    return DataLoader(dataset, batch_size=batch_size, shuffle=True, drop_last=True), vocab
