"""数据集的准备"""
from torch.utils.data import Dataset, DataLoader
import numpy as np
import torch

import config


class NumData(Dataset):
    def __init__(self):
        super().__init__()
        np.random.seed(10)  # 设置随机数种子，确保每次训练self.data是一样的
        self.data = np.random.randint(low=1, high=1e8, size=[500000])

    def __getitem__(self, index):
        input = list(str(self.data[index]))
        target = input + ['0']
        input_length = len(input)
        target_length = len(target)
        return input, target, input_length, target_length

    def __len__(self):
        return self.data.shape[0]


def collate_fn(batch):
    # print(batch)  # [(input, target, input_length, target_length)...] 共batch_size条数据
    # print(*batch)
    # print(list(zip(*batch)))
    batch = sorted(batch, key=lambda x: x[3], reverse=True)
    input, target, input_length, target_length = zip(*batch)
    # print(input)
    # print(target)
    # input: [batch_size, max_len]
    input = [config.num_seq.word_to_num_transform(seq, max_len=config.max_len) for seq in input]
    # target: [batch_size, max_len+2]
    target = [config.num_seq.word_to_num_transform(seq, max_len=config.max_len+1, add_eos=True) for seq in target]

    input = torch.LongTensor(input)
    target = torch.LongTensor(target)
    input_length = torch.LongTensor(input_length)
    target_length = torch.LongTensor(target_length)

    return input, target, input_length, target_length


def get_loader_data():
    return DataLoader(dataset=NumData(), batch_size=config.batch_size, shuffle=True, collate_fn=collate_fn)


if __name__ == '__main__':
    # num_data = NumData()
    # print(num_data.data.shape)
    # print(num_data[0])
    for input, target, input_length, target_length in get_loader_data():
        print(input.size())
        print(target.size())
        print("*" * 10)
        print(input_length.size())
        print(target_length.size())
        break
        pass




