import pandas as pd

from transformers import AutoTokenizer

import torch
from torch.utils.data import DataLoader, Dataset
class PreTrainDataset(Dataset):
    def __init__(self, data_list, tokenizer):
        super(PreTrainDataset, self).__init__()
        self.data_list = data_list
        self.len = len(data_list)
        self.tokenizer = tokenizer

    def __getitem__(self, index):
        example = self.data_list[index]
        data = self.tokenizer.encode_plus(example, return_token_type_ids=True, return_attention_mask=True)
        input_ids, token_type_ids, attention_mask = self.pad(data['input_ids'], data['token_type_ids'],
                                                             data['attention_mask'], 64)
        return {'input_ids': input_ids,
                'token_type_ids': token_type_ids,
                'attention_mask': attention_mask}

    def __len__(self):
        return self.len


class Collator:
    def __init__(self, tokenizer, max_seq_len):
        self.tokenizer = tokenizer
        self.max_seq_len = max_seq_len

    def pad(self, input_ids_list, token_type_ids_list, attention_mask_list, max_seq_len):
        # 初始化填充长度
        input_ids = torch.zeros((len(input_ids_list), max_seq_len), dtype=torch.long)
        token_type_ids = torch.zeros_like(input_ids)
        attention_mask = torch.zeros_like(input_ids)
        # 遍历获取输入
        for i in range(len(input_ids_list)):
            seq_len = len(input_ids_list[i])

            if seq_len < max_seq_len:  # 如果小于最大长度
                input_ids[i, :seq_len] = torch.tensor(input_ids_list[i], dtype=torch.long)
                token_type_ids[i, :seq_len] = torch.tensor(token_type_ids_list[i], dtype=torch.long)
                attention_mask[i, :seq_len] = torch.tensor(attention_mask_list[i], dtype=torch.long)
            else:  # 如果大于或等于
                # 最后一位加上tokenizer的特殊占位
                input_ids[i] = torch.tensor(
                    input_ids_list[i][:max_seq_len - 1] + [self.tokenizer.sep_token_id], dtype=torch.long)
                token_type_ids[i] = torch.tensor(
                    token_type_ids_list[i][:max_seq_len], dtype=torch.long)
                attention_mask[i] = torch.tensor(
                    attention_mask_list[i][:max_seq_len], dtype=torch.long)

        return input_ids, token_type_ids, attention_mask

    def __call__(self, examples):
        # 获取数据
        input_ids_list, token_type_ids_list, attention_mask_list = list(zip(*examples))
        # 求句子最大长度
        cur_seq_len = max([len(ids) for ids in input_ids_list])  # 当前数据最大长度
        max_seq_len = min(cur_seq_len, self.max_seq_len)  # 最大长度
        print()
        # 填充句子
        input_ids, token_type_ids, attention_mask = self.pad(input_ids_list, token_type_ids_list, attention_mask_list,
                                                             max_seq_len)
        # 返回结果
        data = {
            'input_ids': input_ids,
            'token_type_ids': token_type_ids,
            'attention_mask': attention_mask,
            'labels': labels,
        }
        return data


tokenizer = AutoTokenizer.from_pretrained(tokenizer_checkpoint)
collate_fn = Collator(tokenizer, 128)

train_dataset = PreTrainDataset(data_df[:800000]['text'].tolist(), tokenizer)
test_dataset = PreTrainDataset(data_df[800000:]['text'].tolist(), tokenizer)

train_dataloader = DataLoader(train_dataset, batch_size=4, collate_fn=collate_fn)
test_dataloader = DataLoader(test_dataset, batch_size=4, collate_fn=collate_fn)