init
Browse files- tokenize_dataset_s2s.py +5 -1
tokenize_dataset_s2s.py
CHANGED
@@ -1,7 +1,8 @@
|
|
1 |
import os
|
|
|
2 |
|
|
|
3 |
import torch
|
4 |
-
import numpy as np
|
5 |
from datasets import load_dataset, DatasetDict
|
6 |
from encodec_audio_tokenizer import EncodecTokenizer
|
7 |
|
@@ -33,3 +34,6 @@ dataset = dataset.map(
|
|
33 |
desc="tokenize dataset"
|
34 |
)
|
35 |
DatasetDict({"train": dataset}).push_to_hub(f"{hf_org}/{hf_dataset}.tokenized", config_name=f"subset_{dataset_id}")
|
|
|
|
|
|
|
|
1 |
import os
|
2 |
+
from os.path import expanduser
|
3 |
|
4 |
+
import shutil
|
5 |
import torch
|
|
|
6 |
from datasets import load_dataset, DatasetDict
|
7 |
from encodec_audio_tokenizer import EncodecTokenizer
|
8 |
|
|
|
34 |
desc="tokenize dataset"
|
35 |
)
|
36 |
DatasetDict({"train": dataset}).push_to_hub(f"{hf_org}/{hf_dataset}.tokenized", config_name=f"subset_{dataset_id}")
|
37 |
+
cache_dir = f"{expanduser('~')}/.cache/huggingface/datasets/{hf_org}___{hf_dataset}/subset_{dataset_id}"
|
38 |
+
if os.path.exists(cache_dir):
|
39 |
+
shutil.rmtree(cache_dir)
|