init
Browse files- tokenize_dataset_s2s.py +3 -6
tokenize_dataset_s2s.py
CHANGED
@@ -19,10 +19,10 @@ tokenizer = EncodecTokenizer.from_pretrained()
|
|
19 |
|
20 |
def tokenize(example):
|
21 |
for side in sides:
|
22 |
-
wav = torch.as_tensor(example[f"{side}.audio"]["array"].reshape(1, 1, -1), dtype=torch.
|
23 |
example[f"{side}.audio.tokens"] = tokenizer.wav_to_tokens(
|
24 |
wav=wav, sample_rate=example[f"{side}.audio"]["sampling_rate"]
|
25 |
-
).numpy().tolist()
|
26 |
return example
|
27 |
|
28 |
|
@@ -32,7 +32,4 @@ dataset = dataset.map(
|
|
32 |
num_proc=num_proc,
|
33 |
desc="tokenize dataset"
|
34 |
)
|
35 |
-
DatasetDict({"train": dataset}).push_to_hub(
|
36 |
-
f"{hf_org}/{hf_dataset}.tokenized",
|
37 |
-
config_name=f"subset_{dataset_id}"
|
38 |
-
)
|
|
|
19 |
|
20 |
def tokenize(example):
|
21 |
for side in sides:
|
22 |
+
wav = torch.as_tensor(example[f"{side}.audio"]["array"].reshape(1, 1, -1), dtype=torch.float32)
|
23 |
example[f"{side}.audio.tokens"] = tokenizer.wav_to_tokens(
|
24 |
wav=wav, sample_rate=example[f"{side}.audio"]["sampling_rate"]
|
25 |
+
).numpy().tolist()[0]
|
26 |
return example
|
27 |
|
28 |
|
|
|
32 |
num_proc=num_proc,
|
33 |
desc="tokenize dataset"
|
34 |
)
|
35 |
+
DatasetDict({"train": dataset}).push_to_hub(f"{hf_org}/{hf_dataset}.tokenized", config_name=f"subset_{dataset_id}")
|
|
|
|
|
|