awacke1's picture
Update app.py
c4cc6f7
raw
history blame
1.57 kB
from datasets import load_dataset
raw_datasets = load_dataset("allocine")
#raw_datasets.save_to_disk("awacke1/my-arrow-datasets")
raw_datasets.save_to_disk("my-arrow-datasets")
#raw_datasets = load_dataset("awacke1/my-arrow-datasets")
#raw_datasets = load_dataset("my-arrow-datasets")
#raw_datasets.cache_files
#from datasets import load_dataset
#dataset = load_dataset("awacke1/my-arrow-datasets")
from datasets import load_from_disk
#arrow_datasets_reloaded = load_from_disk("awacke1/my-arrow-datasets")
arrow_datasets_reloaded = load_from_disk("my-arrow-datasets")
arrow_datasets_reloaded
for split, dataset in raw_datasets.items():
dataset.to_csv(f"my-dataset-{split}.csv", index=None)
data_files = {
"train": "my-dataset-train.csv",
"validation": "my-dataset-validation.csv",
"test": "my-dataset-test.csv",
}
csv_datasets_reloaded = load_dataset("csv", data_files=data_files)
csv_datasets_reloaded
for split, dataset in raw_datasets.items():
dataset.to_json(f"my-dataset-{split}.jsonl")
for split, dataset in raw_datasets.items():
dataset.to_parquet(f"my-dataset-{split}.parquet")
json_data_files = {
"train": "my-dataset-train.jsonl",
"validation": "my-dataset-validation.jsonl",
"test": "my-dataset-test.jsonl",
}
parquet_data_files = {
"train": "my-dataset-train.parquet",
"validation": "my-dataset-validation.parquet",
"test": "my-dataset-test.parquet",
}
json_datasets_reloaded = load_dataset("json", data_files=json_data_files)
parquet_datasets_reloaded = load_dataset("parquet", data_files=parquet_data_files)