from datasets import load_dataset

def get_train_eval_data(dataset_args, formator_fn, tokenizer):
    train_dataset = load_dataset('parquet', data_files={'train':dataset_args['train_dataset_path'].split(',')})['train']
    eval_dataset = load_dataset('parquet', data_files={'train':dataset_args['eval_dataset_path']})['train']
    train_dataset = train_dataset.map(formator_fn, batched=True, num_proc=dataset_args['num_data_proc'], fn_kwargs={
        "tokenizer": tokenizer,
        **dataset_args
    })
    eval_dataset = eval_dataset.map(formator_fn, batched=True, num_proc=dataset_args['num_data_proc'], fn_kwargs={
        "tokenizer": tokenizer,
        **dataset_args
    })
    
    return train_dataset, eval_dataset