from datasets import load_from_disk
from torch.utils.data import DataLoader
from config import PROCESSED_DATA_DIR, BATCH_SIZE


def get_dataloader(train=True):
    """ 获取数据加载器 """
    data_path = str(PROCESSED_DATA_DIR / ('train' if train else 'test'))
    dataset = load_from_disk(data_path)
    dataset.set_format(type='torch')
    return DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True)


if __name__ == '__main__':
    train_dataloader = get_dataloader()
    print(len(train_dataloader))  # 785

    test_dataloader = get_dataloader(train=False)
    print(len(test_dataloader))  # 197

    for batch in train_dataloader:
        print(batch['input_ids'].shape)  # [batch_size, seq_len]
        print(batch['attention_mask'].shape)  # [batch_size, seq_len]
        print(batch['label'].shape)  # [batch_size]
        break
