vlm / data_generator.py
dunghuynh
final
66b9156
raw
history blame
3.07 kB
import argparse
from datasets import load_dataset
import os
def save_dataset_to_parquet(dataset, output_path, dataset_name):
os.makedirs(output_path, exist_ok=True)
for split in dataset:
split_dataset = dataset[split]
# Save the entire split as a single Parquet file
split_path = os.path.join(output_path, dataset_name, f"{split}.parquet")
os.makedirs(os.path.dirname(split_path), exist_ok=True) # Create directories if needed
split_dataset.to_parquet(split_path)
def main(dataset_name):
num_proc = os.cpu_count()
dataset_paths = {
"speech_vqav2": "./hf_generator_vqav2.py",
"speech_vg": "./hf_generator_vg.py",
"speech_asr": "./hf_generator_asr.py",
"speech_tts": "./hf_generator_tts.py",
"laion": "./hf_generator_laion.py",
}
if dataset_name not in dataset_paths:
raise ValueError(f"Dataset name '{dataset_name}' not recognized. Available options: {list(dataset_paths.keys())}")
ds = load_dataset(
dataset_paths[dataset_name],
trust_remote_code=True,
cache_dir=f"~/.cache/{dataset_name}",
num_proc=num_proc
)
save_dataset_to_parquet(ds, "./hf_datasets/", dataset_name)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Save specified Hugging Face dataset to Parquet format.")
parser.add_argument("dataset_name", type=str, help="Name of the dataset to load and save to Parquet. Options: speech_vqav2, speech_vg, speech_asr, speech_tts, laion")
args = parser.parse_args()
main(args.dataset_name)
# from datasets import load_dataset
# import os
# def save_dataset_to_parquet(dataset, output_path, dataset_name):
# os.makedirs(output_path, exist_ok=True)
# for split in dataset:
# split_dataset = dataset[split]
# # Save the entire split as a single Parquet file
# split_path = os.path.join(output_path, dataset_name, f"{split}.parquet")
# os.makedirs(os.path.dirname(split_path), exist_ok=True) # Create directories if needed
# split_dataset.to_parquet(split_path)
# num_proc = os.cpu_count()
# ds = load_dataset("./hf_generator_vqav2.py", trust_remote_code=True, cache_dir="~/.cache/speech_vqav2", num_proc=num_proc)
# save_dataset_to_parquet(ds, "./hf_datasets/", "speech_vqav2")
# ds = load_dataset("./hf_generator_vg.py", trust_remote_code=True, cache_dir="~/.cache/speech_vg", num_proc=num_proc)
# save_dataset_to_parquet(ds, "./hf_datasets/", "speech_vg")
# ds = load_dataset("./hf_generator_asr.py", trust_remote_code=True, cache_dir="~/.cache/speech_asr", num_proc=64)
# save_dataset_to_parquet(ds, "./hf_datasets/", "speech_asr")
# ds = load_dataset("./hf_generator_tts.py", trust_remote_code=True, cache_dir="~/.cache/speech_tts", num_proc=64)
# save_dataset_to_parquet(ds, "./hf_datasets/", "speech_tts")
# ds = load_dataset("./hf_generator_laion.py", trust_remote_code=True, cache_dir="~/.cache/speech_laion", num_proc=64)
# save_dataset_to_parquet(ds, "./hf_datasets/", "laion")