import torch

from tqdm import tqdm
from torch.utils.data.dataset import Dataset
from pytorch_pretrained_bert import BertTokenizer

class SeqDataset(Dataset):
    def __init__(self, path, config):
        super(SeqDataset, self).__init__()

        tokenizer = BertTokenizer.from_pretrained(config.bert_path)

        with open(path, 'r', encoding='utf-8') as file:
            data = file.read().strip().split('\n')

        X = list()
        Y = list()
        for sample in tqdm(data):
            l, t = sample.split(' ')

            seq_token = tokenizer.tokenize(t)
            if len(seq_token) > config.pad_size:
                seq_token = seq_token[:config.pad_size]
            else:
                seq_token += ["[PAD]"]*(config.pad_size-len(seq_token))

            X.append(tokenizer.convert_tokens_to_ids(seq_token))
            Y.append(eval(l))

        self.X = torch.tensor(X, dtype=torch.long)
        self.Y = torch.tensor(Y, dtype=torch.long)

    def __getitem__(self, item):
        return self.X[item], self.Y[item]

    def __len__(self):
        return len(self.X)