from datasets import load_dataset
from transformers import AutoTokenizer
from torch.utils.data import DataLoader




def get_data(tokenizer, datasets_path:str=None,train_batch_size:int=16,eval_batch_size:int=16):
    """
    获取数据集
    
    :param tokenizer : 指定预训练模型的分词器
    :param datasets_path : 本地数据集路径, 默认为 None

    :returns train_dataloader : 训练数据加载器      
    :returns eval_dataloader : 验证数据加载器
    """

    if datasets_path is None:
        # 下载数据集
        try:
            datasets = load_dataset("IMDB", cache_dir="./datasets")
        except Exception:
            raise ValueError("在线下载数据集失败, 请指定本地数据集路径")
    else:
        datasets = load_dataset(datasets_path)

    def tokenize_function(examples):
        # max_length 必须指定值
        return tokenizer(examples["text"], padding="max_length", truncation=True, max_length=512)

    tokenized_datasets = datasets.map(tokenize_function, batched=True)

    # 移除 text 列, 因为模型不接受原始文本作为输入:
    tokenized_datasets = tokenized_datasets.remove_columns(["text"])
    # 将 label 列重命名为 labels, 因为模型期望参数的名称为 labels:
    tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
    # 设置数据集的格式以返回 PyTorch 张量
    tokenized_datasets.set_format("torch")

    train_dataloader = DataLoader(tokenized_datasets['train'], shuffle=True, batch_size=train_batch_size)
    eval_dataloader = DataLoader(tokenized_datasets['test'], batch_size=eval_batch_size)

    return train_dataloader, eval_dataloader

if __name__ == '__main__':
    tokenizer = AutoTokenizer.from_pretrained("D:/hugging_face/models/bert-base-uncased")

    # train_dataloader, eval_dataloader = get_data(tokenizer, datasets_path="D:/datasets/imdb")  
    train_dataloader, eval_dataloader = get_data(tokenizer)  

    for batch in train_dataloader:
        print(batch)
        break
    