import pickle
from data_handle.preprocess_dataset import MyDataset
from torch.utils.data import DataLoader
import torch.nn.utils.rnn as rnn_utils


def get_dataset(train_path, test_path):
    with open(train_path, 'rb') as f:
        input_list = pickle.load(f)
    train_dataset = MyDataset(input_list, max_len=300)

    with open(test_path, 'rb') as f:
        input_list = pickle.load(f)
    test_dataset = MyDataset(input_list, max_len=300)
    return train_dataset, test_dataset


def collate_fn(data):
    # 按照该批次最大长度样本进行补0
    input_ids = rnn_utils.pad_sequence(sequences=data, batch_first=True, padding_value=0)

    # 按照该批次最大长度样本进行补-100
    labels = rnn_utils.pad_sequence(sequences=data, batch_first=True, padding_value=-100)
    return input_ids, labels


def get_dataloader(train_path, test_path, batch_size=4):
    train_dataset, test_dataset = get_dataset(train_path, test_path)
    train_dataloader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, drop_last=True,
                                  collate_fn=collate_fn)
    test_dataloader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True, drop_last=True,
                                 collate_fn=collate_fn)
    return train_dataloader,test_dataloader


if __name__ == '__main__':
    train_dataset_path = '../data/medical_train.pkl'
    test_dataset_path = '../data/medical_valid.pkl'
    train_dataloader,test_dataloader = get_dataloader(train_path=train_dataset_path, test_path=test_dataset_path)
    for i, (input_ids, labels) in enumerate(train_dataloader):
        print(i)
        print(input_ids.shape)
        print(labels.shape)
        break
    for i, (input_ids, labels) in enumerate(test_dataloader):
        print(i)
        print(input_ids.shape)
        print(labels.shape)
        break
