import torch
import numpy as np
from transformers import AutoTokenizer, AutoModelForPreTraining
from transformers import BertTokenizer, BigBirdModel, BertForSequenceClassification

import model.config as conf


class Dataset:
    def __init__(self, data):
        # self.text = '你好，我是一条测试数据'
        # self.max_length = 128
        self.data = data
        self.tokenizer = BertTokenizer.from_pretrained(conf.model_name_or_path)
        # self.tokenizer = AutoTokenizer.from_pretrained(conf.model_name_or_path)

    def __len__(self):
        return len(self.data)

    def __getitem__(self, item):

        content, label = self.data[item]

        token = self.tokenizer.tokenize(content)
        token = ['[SEP]' if t == '。' else t for t in token]

        if len(token) > conf.max_length - 2:
            token = token[:conf.max_length - 2]
        token = ['[CLS]'] + token + ['[SEP]']
        token_pad = token + ['PAD'] * (conf.max_length - len(token))
        token_id = self.tokenizer.convert_tokens_to_ids(token_pad)
        atten_mask = [1] * len(token) + [0] * (conf.max_length - len(token))

        return torch.tensor(token_id, dtype=torch.long), torch.tensor(atten_mask, dtype=torch.long), torch.tensor(int(label), dtype=torch.long)
