import pickle

import torch
from torch.utils.data import Dataset, DataLoader
import torch.nn.utils.rnn as rnn_utils
from .gpt_config import gpt_logger as logger

class GPTDataSet(Dataset):
    def __init__(self, input_list, max_len):
        """
        input_list (b,seq_len_dy:token_ids), max_len: int
        """
        self.input_list = input_list
        self.max_len = max_len

    def __getitem__(self, index):
        seq_id_list = self.input_list[index]
        seq_id_list = seq_id_list[:self.max_len]
        return torch.tensor(seq_id_list, dtype=torch.long)

    def __len__(self):
        return len(self.input_list)


def get_dataset(args):
    logger.info("loading training dataset")
    train_list = pickle.load(open(args.save_train_list_file_path, "rb"))
    dataset = GPTDataSet(train_list, args.max_input_size)  # 传入list,传出tensor
    return dataset


def padding_batch_data(batch):
    """让输入数据具有登长的长度,填充值分别为5,-100,均填充至最大序列的长度"""
    input_ids = rnn_utils.pad_sequence(batch, batch_first=True, padding_value=5)
    labels = rnn_utils.pad_sequence(batch, batch_first=True, padding_value=-100)
    return input_ids, labels


def get_loader(args):
    """如果每条数据的长度不统一,自动按规则填充"""
    logger.info("loading training dataset")
    train_list = pickle.load(open(args.save_train_list_file_path, "rb"))
    dataset = GPTDataSet(train_list, args.max_input_size)  # 传入list,传出tensor
    train_dataloader = DataLoader(
        dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers,
        collate_fn=padding_batch_data, drop_last=True
    )
    return train_dataloader
