import random
import torch
import d2l
from chinese_split_word_ictclas_custom import load_corpus_time_machine


class RnnClassficationDataset(torch.utils.data.Dataset):
    def __init__(self, max_length_add=10):
        _, self.vocab, self.contents = load_corpus_time_machine()
        self.label_size = len(self.contents[0][3])
        self.max_length_add = max_length_add
        self.max_length = 0
        for i in self.contents:
            if len(self.vocab[i[0]]) > self.max_length:
                self.max_length = len(self.vocab[i[0]])
        self.max_length = self.max_length + max_length_add

    def __getitem__(self, index):
        data = self.contents[index]
        vector = torch.LongTensor(self.vocab[data[0]])
        pad_length = self.max_length - len(vector)
        padded_vector = torch.cat((vector, torch.zeros(pad_length)))
        label = torch.LongTensor(data[3])
        return padded_vector, label

    def __len__(self):
        return len(self.contents)


batch_size = 128
dataset = RnnClassficationDataset(5)
# print(dataset.max_length)
data_iter = torch.utils.data.DataLoader(
    dataset, batch_size, shuffle=True, drop_last=True)

train_iter, vocab = data_iter, dataset.vocab
print(len(vocab))
# from torch.nn import functional as F
# for X, label in data_iter:
#     print(label)
