import torch
from torch.utils.data import DataLoader, Dataset
import numpy as np
import seq2seq.config as config


class NumDataset(Dataset):
    def __init__(self):
        np.random.seed(10)  # 使用一个随机种子，只要10不变，下面的randint就不变，不过这样感觉不好，应该是外部输入数据才对
        self.data = np.random.randint(1, 1e8, size=[500000])

    def __getitem__(self, index):
        input = list(str(self.data[index]))
        label = input + ["0"]
        input_length = len(input)
        label_length = len(label)
        return input, label, input_length, label_length

    def __len__(self):
        return self.data.shape[0]


def collate_fn(batch):
    batch = sorted(batch, key=lambda x: x[3], reverse=True)
    input, target, input_length, target_length = zip(*batch)
    input = [config.num_sequence.transform(i, max_len=10) for i in input]
    target = [config.num_sequence.transform(i, max_len=11, add_eos=True) for i in target]  # label比input多最后一位0
    return torch.LongTensor(input).to(config.device), torch.LongTensor(target).to(config.device), torch.LongTensor(input_length).to(config.device), torch.LongTensor(
        target_length).to(config.device)


train_dataloader = DataLoader(NumDataset(), batch_size=config.train_batch_size, shuffle=True, collate_fn=collate_fn)

if __name__ == '__main__':
    # ds = NumDataset()
    # print(ds.data[:10])
    # print(ds[0])
    # print(len(ds))
    for input, label, input_length, label_length in train_dataloader:
        print(input)
        print(label)
        print(input_length)
        print(label_length)
        break
