import pandas as pd
import torch
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import Dataset
from transformers import BertTokenizer


class myData(Dataset):
    def __init__(self):
        super(myData, self).__init__()
        self.data = pd.read_csv("data/merge.csv", encoding='gbk')
        self.tokenizer = BertTokenizer.from_pretrained('chinese-bert-wwm-ext')
    def __len__(self):
        return self.data.shape[0]
    def __getitem__(self, item):
        data = self.data.iloc[item]
        input_txt = data['random_text']
        output_txt = data['origin_text']
        label = [0] + [int(x) for x in data['label'].strip().split()] + [0]
        input_ids = self.tokenizer.convert_tokens_to_ids(['[CLS]']+ list(input_txt) + ['[SEP]'])
        output_ids = self.tokenizer.convert_tokens_to_ids(['[CLS]'] + list(output_txt) + ['[SEP]'])
        mask = [1] * len(input_txt)
        return  torch.tensor(input_ids), torch.tensor(output_ids), torch.tensor(label).float(), torch.tensor(mask).float()


def collect_fn(batch_data):
    batch_inp_ids = [data[0] for data in batch_data]
    batch_oup_ids = [data[1] for data in batch_data]
    batch_label = [data[2] for data in batch_data]
    batch_mask = [data[3] for data in batch_data]
    batch_inp_ids = pad_sequence(batch_inp_ids, batch_first=True)
    batch_oup_ids = pad_sequence(batch_oup_ids, batch_first=True)
    batch_label = pad_sequence(batch_label, batch_first=True)
    batch_mask = pad_sequence(batch_mask, batch_first=True)
    return batch_inp_ids, batch_oup_ids, batch_label, batch_mask
