
import torch
import pandas as pd
from transformers import BertTokenizer
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence


class myData(Dataset):

    def __init__(self, config):

        super(myData, self).__init__()
        self.data = pd.read_csv(config.datapath, encoding='gbk')  # 加载数据
        self.tokenizer = BertTokenizer.from_pretrained(config.vocab_file)  # 默认就会添加起止符、未知符等设置

    def __len__(self):

        return self.data.shape[0]

    def __getitem__(self, item):

        data = self.data.iloc[item]  # pd读指定行的数
        inp_txt = data['random_text']  # 输入文本，包含错字
        out_txt = data['origin_text']  # 输出文本，完全正确
        label = [0]+[int(x) for x in data['label'].strip().split()]+[0]

        inp_ids = self.tokenizer.convert_tokens_to_ids(['[CLS]']+list(inp_txt)+['[SEP]'])
        out_ids = self.tokenizer.convert_tokens_to_ids(['[CLS]']+list(out_txt)+['[SEP]'])


        mask = [1]*len(inp_ids)
        # 遮挡
        return torch.tensor(inp_ids), torch.tensor(out_ids), torch.tensor(label).float(), torch.tensor(mask).float()



def collect_fn(batch_data):

    batch_inp_ids = [data[0] for data in batch_data]

    batch_out_ids = [data[1] for data in batch_data]
    batch_label = [data[2] for data in batch_data]
    batch_mask = [data[3] for data in batch_data]
    # 从batch中取数

    batch_inp_ids = pad_sequence(batch_inp_ids, batch_first=True)
    batch_out_ids = pad_sequence(batch_out_ids, batch_first=True)
    batch_label = pad_sequence(batch_label, batch_first=True)
    batch_mask = pad_sequence(batch_mask, batch_first=True)
    # 填充，默认以最大序列长度填充

    return batch_inp_ids, batch_out_ids, batch_label, batch_mask


if __name__ == '__main__':
    from config import Config

    config = Config()
    torch_dataset = myData(config)
    data_loader = DataLoader(dataset=torch_dataset, batch_size=2, collate_fn=collect_fn)
    for i in data_loader:
        print(i)
        break
