benjamin-paine
commited on
Upload convert.py
Browse files- convert.py +461 -0
convert.py
ADDED
@@ -0,0 +1,461 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Converts Amazon's DiPCo dataset to parquet format in an utterance-wise manner
|
2 |
+
# Author: Benjamin Paine <painebenjamin@gmail.com>
|
3 |
+
from __future__ import annotations
|
4 |
+
|
5 |
+
import os
|
6 |
+
import gc
|
7 |
+
import json
|
8 |
+
import torch
|
9 |
+
import torchaudio
|
10 |
+
|
11 |
+
from time import perf_counter
|
12 |
+
from datasets import Dataset, Features, Audio, Value, ClassLabel
|
13 |
+
from typing import Sequence, Dict, Any, Iterable, Iterator, TYPE_CHECKING
|
14 |
+
|
15 |
+
if TYPE_CHECKING:
|
16 |
+
# Guard this import for type-checking only
|
17 |
+
from typing_extensions import Literal
|
18 |
+
|
19 |
+
class RotatingAudioDirectoryData:
|
20 |
+
"""
|
21 |
+
A helper class for reading data just-in-time from a directory, and maintaining
|
22 |
+
a FIFO-based cache of data in memory.
|
23 |
+
|
24 |
+
Also maintains a loose memory size, evicting last-read data when necessary.
|
25 |
+
Accessing data will opaquely read from disk when eneded, with access resetting
|
26 |
+
its place in the eviction queue.
|
27 |
+
"""
|
28 |
+
data: Dict[str, torch.Tensor]
|
29 |
+
data_sizes: Dict[str, int]
|
30 |
+
data_access_times: Dict[str, float]
|
31 |
+
sample_rate: int
|
32 |
+
eviction_rate: float
|
33 |
+
|
34 |
+
def __init__(self, directory: str, max_size: int, eviction_rate: float=0.25) -> None:
|
35 |
+
"""
|
36 |
+
:param directory: The directory to read data from.
|
37 |
+
:param max_size: The maximum size of data to keep in memory.
|
38 |
+
:param eviction_rate: The fraction of data to evict when full.
|
39 |
+
"""
|
40 |
+
self.directory = directory
|
41 |
+
self.max_size = max_size
|
42 |
+
self.file_names = [
|
43 |
+
f for f in os.listdir(directory)
|
44 |
+
if f.endswith(".wav")
|
45 |
+
and not f.startswith(".")
|
46 |
+
]
|
47 |
+
self.data = {}
|
48 |
+
self.data_sizes = {}
|
49 |
+
self.access_times = {}
|
50 |
+
self.eviction_rate = eviction_rate
|
51 |
+
self.sample_rate = 0
|
52 |
+
self.read(self.file_names[0]) # Get sample rate
|
53 |
+
|
54 |
+
@property
|
55 |
+
def size(self) -> int:
|
56 |
+
"""
|
57 |
+
:return: The total size of data in memory.
|
58 |
+
"""
|
59 |
+
return sum(self.data_sizes.values())
|
60 |
+
|
61 |
+
def evict(self) -> None:
|
62 |
+
"""
|
63 |
+
Evicts the least-recently accessed items from memory.
|
64 |
+
|
65 |
+
This is made to be called infrequently, so it will evict up to the
|
66 |
+
configured eviction rate (default 25%) of the total data.
|
67 |
+
"""
|
68 |
+
num_to_evict = int(self.eviction_rate * len(self.data))
|
69 |
+
evict_keys = sorted(self.access_times, key=self.access_times.get)[:num_to_evict]
|
70 |
+
for key in evict_keys:
|
71 |
+
del self.data[key]
|
72 |
+
del self.data_sizes[key]
|
73 |
+
del self.access_times[key]
|
74 |
+
gc.collect()
|
75 |
+
|
76 |
+
def check_evict(self, size: int) -> None:
|
77 |
+
"""
|
78 |
+
Checks if the new data will fit, and evicts if necessary.
|
79 |
+
|
80 |
+
:param size: The size of the new data to add.
|
81 |
+
"""
|
82 |
+
if self.size + size > self.max_size:
|
83 |
+
self.evict()
|
84 |
+
|
85 |
+
def read(self, file_name: str) -> None:
|
86 |
+
"""
|
87 |
+
Reads a file from disk and stores it in memory.
|
88 |
+
|
89 |
+
:param file_name: The name of the file to read.
|
90 |
+
"""
|
91 |
+
file_path = os.path.join(self.directory, file_name)
|
92 |
+
file_size = os.path.getsize(file_path) * 2 # 16-bit audio
|
93 |
+
self.check_evict(file_size)
|
94 |
+
|
95 |
+
try:
|
96 |
+
data, sample_rate = torchaudio.load(file_path)
|
97 |
+
except RuntimeError as e:
|
98 |
+
raise RuntimeError(f"Error reading file {file_path}: {e}")
|
99 |
+
|
100 |
+
if self.sample_rate == 0:
|
101 |
+
self.sample_rate = sample_rate
|
102 |
+
|
103 |
+
assert self.sample_rate == sample_rate, "Unexpected sample rate mismatch"
|
104 |
+
|
105 |
+
self.data[file_name] = data[0] # Mono audio
|
106 |
+
self.data_sizes[file_name] = file_size
|
107 |
+
self.access_times[file_name] = perf_counter()
|
108 |
+
|
109 |
+
def __getitem__(self, key: str) -> torch.Tensor:
|
110 |
+
"""
|
111 |
+
Gets an item from the data, reading it from disk if necessary.
|
112 |
+
|
113 |
+
:param key: The key of the item to get.
|
114 |
+
:return: The data corresponding to the key.
|
115 |
+
"""
|
116 |
+
if key not in self.data:
|
117 |
+
if key not in self.file_names:
|
118 |
+
raise KeyError(f"File {key} not found in directory")
|
119 |
+
self.read(key)
|
120 |
+
self.access_times[key] = perf_counter()
|
121 |
+
return self.data[key]
|
122 |
+
|
123 |
+
def maybe_use_tqdm(
|
124 |
+
iterable: Iterable[Any],
|
125 |
+
desc: Optional[str]=None
|
126 |
+
) -> Iterator[Any]:
|
127 |
+
"""
|
128 |
+
Uses tqdm if available, otherwise iterates as-is.
|
129 |
+
|
130 |
+
:param iterable: The iterable to iterate over.
|
131 |
+
:param desc: The description to show in the progress bar.
|
132 |
+
:return: The iterator over the iterable.
|
133 |
+
"""
|
134 |
+
try:
|
135 |
+
import tqdm
|
136 |
+
yield from tqdm.tqdm(iterable, desc=desc)
|
137 |
+
except ImportError:
|
138 |
+
yield from iterable
|
139 |
+
|
140 |
+
def mix_audio(audio_to_mix: Sequence[torch.Tensor]) -> torch.Tensor:
|
141 |
+
"""
|
142 |
+
Mixes multiple audio arrays together.
|
143 |
+
"""
|
144 |
+
mixed_audio = torch.stack(audio_to_mix)
|
145 |
+
return torch.mean(mixed_audio, dim=0)
|
146 |
+
|
147 |
+
def get_seconds_from_timestamp(timestamp: str) -> float:
|
148 |
+
"""
|
149 |
+
Converts a timestamp string to seconds.
|
150 |
+
Expects a timestamp of format `hh:mm:ss.ff`
|
151 |
+
|
152 |
+
:param timestamp: The timestamp string to convert.
|
153 |
+
:return: The number of seconds represented by the timestamp.
|
154 |
+
"""
|
155 |
+
parts = timestamp.split(":")
|
156 |
+
hours = int(parts[0])
|
157 |
+
minutes = int(parts[1])
|
158 |
+
seconds = float(parts[2])
|
159 |
+
return hours * 3600 + minutes * 60 + seconds
|
160 |
+
|
161 |
+
def process_session_file(
|
162 |
+
session_file: str,
|
163 |
+
wav_data: RotatingAudioDirectoryData,
|
164 |
+
channel_mode: Literal["split", "mixed"] = "split",
|
165 |
+
) -> Iterable[Dict[str, Any]]:
|
166 |
+
"""
|
167 |
+
Processes a single session file.
|
168 |
+
|
169 |
+
:param session_file: The path to the session file to process.
|
170 |
+
:param wav_data: The audio data to use for the session.
|
171 |
+
:param channel_mode: The channel mode to use for processing.
|
172 |
+
:return: An iterator over the processed utterances.
|
173 |
+
"""
|
174 |
+
with open(session_file, "r") as f:
|
175 |
+
session_data = json.load(f)
|
176 |
+
|
177 |
+
# Static device distance data from the DiPCo paper (https://arxiv.org/abs/1909.13447)
|
178 |
+
participant_position_device_distance_map = [
|
179 |
+
[1600, 2240, 3825, 2900, 1760],
|
180 |
+
[1990, 2130, 3950, 3100, 1760],
|
181 |
+
[1820, 1520, 2900, 2030, 2790],
|
182 |
+
[1300, 1120, 3100, 2520, 2820]
|
183 |
+
]
|
184 |
+
# Static map of session to ordered participants
|
185 |
+
participant_session_positions = [
|
186 |
+
[ 0, 1, 2, 3],
|
187 |
+
[ 4, 5, 6, 7],
|
188 |
+
[ 8, 9, 10, 11],
|
189 |
+
[12, 13, 14, 15],
|
190 |
+
[16, 17, 18, 19],
|
191 |
+
[20, 21, 22, 23],
|
192 |
+
[20, 21, 22, 23],
|
193 |
+
[24, 25, 26, 27],
|
194 |
+
[28, 29, 30, 31],
|
195 |
+
[28, 29, 30, 31]
|
196 |
+
]
|
197 |
+
|
198 |
+
for utterance in maybe_use_tqdm(session_data, desc=f"Processing utterances"):
|
199 |
+
# Grab data from dict
|
200 |
+
start_times = utterance["start_time"]
|
201 |
+
end_times = utterance["end_time"]
|
202 |
+
words = utterance["words"]
|
203 |
+
gender = utterance["gender"]
|
204 |
+
nativeness = utterance["nativeness"]
|
205 |
+
mother_tongue = utterance["mother_tongue"]
|
206 |
+
|
207 |
+
session_id_label = utterance["session_id"]
|
208 |
+
session_id = int(session_id_label[1:]) - 1
|
209 |
+
|
210 |
+
participant_id_label = utterance["speaker_id"]
|
211 |
+
participant_id = int(participant_id_label[1:]) - 1
|
212 |
+
|
213 |
+
# Get the participant's position index and device distances
|
214 |
+
participant_position_index = participant_session_positions[session_id].index(participant_id) # 0 - 3
|
215 |
+
participant_device_distances = participant_position_device_distance_map[participant_position_index] # 5 distances
|
216 |
+
|
217 |
+
speaker_metadata = {
|
218 |
+
"transcription": words,
|
219 |
+
"participant_id": participant_id_label,
|
220 |
+
"session_id": session_id_label,
|
221 |
+
"gender": gender,
|
222 |
+
"nativeness": nativeness,
|
223 |
+
"mother_tongue": mother_tongue,
|
224 |
+
}
|
225 |
+
|
226 |
+
# Each key is either "close-talk" or U01, U02, etc.
|
227 |
+
# Go through each and get the timestamp into the corresponding wav file
|
228 |
+
for time_key in start_times.keys():
|
229 |
+
# Get the start and end times in seconds and frames
|
230 |
+
start_timestamp = start_times[time_key]
|
231 |
+
start_time_s = get_seconds_from_timestamp(start_timestamp)
|
232 |
+
start_frame = int(start_time_s * wav_data.sample_rate)
|
233 |
+
# Get the end time in seconds and frames
|
234 |
+
end_timestamp = end_times[time_key]
|
235 |
+
end_time_s = get_seconds_from_timestamp(end_timestamp)
|
236 |
+
end_frame = int(end_time_s * wav_data.sample_rate)
|
237 |
+
# Create a metadata dict for the utterance to use later
|
238 |
+
utterance_metadata = {
|
239 |
+
**speaker_metadata,
|
240 |
+
**{
|
241 |
+
"start_timestamp": start_timestamp,
|
242 |
+
"start_time_s": start_time_s,
|
243 |
+
"start_frame": start_frame,
|
244 |
+
"end_timestamp": end_timestamp,
|
245 |
+
"end_time_s": end_time_s,
|
246 |
+
"end_frame": end_frame,
|
247 |
+
"duration_s": end_time_s - start_time_s,
|
248 |
+
"duration_frames": end_frame - start_frame,
|
249 |
+
},
|
250 |
+
}
|
251 |
+
|
252 |
+
if time_key == "close-talk":
|
253 |
+
# Use participant's close-talk audio
|
254 |
+
device_metadata = {
|
255 |
+
"device_type": "close-talk",
|
256 |
+
"device_id": participant_id_label,
|
257 |
+
"device_channel": 0,
|
258 |
+
"device_distance_mm": 0,
|
259 |
+
}
|
260 |
+
wav_file_name = f"{session_id_label}_{participant_id_label}.wav"
|
261 |
+
audio_array = wav_data[wav_file_name][start_frame:end_frame]
|
262 |
+
yield {
|
263 |
+
"audio": {
|
264 |
+
"array": audio_array,
|
265 |
+
"path": wav_file_name,
|
266 |
+
"sampling_rate": wav_data.sample_rate,
|
267 |
+
},
|
268 |
+
**device_metadata,
|
269 |
+
**utterance_metadata,
|
270 |
+
}
|
271 |
+
else:
|
272 |
+
# Time key will be U01, U02, etc.
|
273 |
+
device_id = int(time_key[1:]) - 1
|
274 |
+
# Use the far-field audio array of 7 microphones
|
275 |
+
device_metadata = {
|
276 |
+
"device_type": "far-field",
|
277 |
+
"device_id": time_key,
|
278 |
+
"device_distance_mm": participant_device_distances[device_id],
|
279 |
+
}
|
280 |
+
audio_to_mix = []
|
281 |
+
# Iterate over each channe
|
282 |
+
for channel in range(7):
|
283 |
+
wav_file_name = f"{session_id_label}_{time_key}.CH{channel+1}.wav"
|
284 |
+
audio_array = wav_data[wav_file_name][start_frame:end_frame]
|
285 |
+
|
286 |
+
if channel_mode == "split":
|
287 |
+
# If using split mode, yield each channel separately
|
288 |
+
yield {
|
289 |
+
"audio": {
|
290 |
+
"array": audio_array,
|
291 |
+
"path": wav_file_name,
|
292 |
+
"sampling_rate": wav_data.sample_rate,
|
293 |
+
},
|
294 |
+
"device_channel": channel+1,
|
295 |
+
**device_metadata,
|
296 |
+
**utterance_metadata,
|
297 |
+
}
|
298 |
+
else:
|
299 |
+
# Otherwise, add to the mix
|
300 |
+
audio_to_mix.append(audio_array)
|
301 |
+
|
302 |
+
if channel_mode == "mixed":
|
303 |
+
# If using mixed mode, mix the audio channels together
|
304 |
+
audio_array = mix_audio(audio_to_mix)
|
305 |
+
yield {
|
306 |
+
"audio": {
|
307 |
+
"array": audio_array,
|
308 |
+
"sampling_rate": wav_data.sample_rate,
|
309 |
+
},
|
310 |
+
"device_channel": 0,
|
311 |
+
**device_metadata,
|
312 |
+
**utterance_metadata,
|
313 |
+
}
|
314 |
+
|
315 |
+
def process_split(
|
316 |
+
dipco_path: str,
|
317 |
+
dipco_split: str,
|
318 |
+
channel_mode: Literal["split", "mixed"] = "split",
|
319 |
+
max_memory_bytes: int = 16*1024**3, # 16GB
|
320 |
+
) -> Iterable[Dict[str, Any]]:
|
321 |
+
"""
|
322 |
+
Processes a split of the DiPCo dataset, iterating through all session files.
|
323 |
+
|
324 |
+
:param dipco_path: The path to the DiPCo dataset.
|
325 |
+
:param dipco_split: The split of the DiPCo dataset to process.
|
326 |
+
:param channel_mode: The channel mode to use for processing.
|
327 |
+
:param max_memory_bytes: The maximum memory to use for audio data.
|
328 |
+
:return: An iterator over the processed utterances.
|
329 |
+
:see: process_session_file
|
330 |
+
"""
|
331 |
+
dipco_path = os.path.abspath(dipco_path)
|
332 |
+
wav_dir = os.path.join(dipco_path, "audio", dipco_split)
|
333 |
+
wav_data = RotatingAudioDirectoryData(wav_dir, max_size=max_memory_bytes)
|
334 |
+
transcriptions_dir = os.path.join(dipco_path, "transcriptions", dipco_split)
|
335 |
+
session_filenames = [
|
336 |
+
f for f in os.listdir(transcriptions_dir)
|
337 |
+
if f.endswith(".json")
|
338 |
+
and not f.startswith(".")
|
339 |
+
]
|
340 |
+
|
341 |
+
for session_filename in maybe_use_tqdm(session_filenames, desc=f"Processing session data"):
|
342 |
+
num_yielded = 0
|
343 |
+
for utterance in process_session_file(
|
344 |
+
os.path.join(transcriptions_dir, session_filename),
|
345 |
+
wav_data,
|
346 |
+
channel_mode=channel_mode,
|
347 |
+
):
|
348 |
+
num_yielded += 1
|
349 |
+
yield utterance
|
350 |
+
|
351 |
+
print(f"Parsed {num_yielded} utterances from {session_filename}")
|
352 |
+
|
353 |
+
del wav_data
|
354 |
+
gc.collect()
|
355 |
+
|
356 |
+
def get_split_dataset(
|
357 |
+
dipco_path: str,
|
358 |
+
dipco_split: str,
|
359 |
+
channel_mode: Literal["split", "mixed"] = "split",
|
360 |
+
) -> Dataset:
|
361 |
+
"""
|
362 |
+
Gets a split of the DiPCo dataset as a Dataset object.
|
363 |
+
|
364 |
+
:param dipco_path: The path to the DiPCo dataset.
|
365 |
+
:param dipco_split: The split of the DiPCo dataset to process.
|
366 |
+
:param channel_mode: The channel mode to use for processing.
|
367 |
+
:return: The processed dataset.
|
368 |
+
:see: process_split
|
369 |
+
"""
|
370 |
+
gen_kwargs = {
|
371 |
+
"dipco_path": dipco_path,
|
372 |
+
"dipco_split": dipco_split,
|
373 |
+
"channel_mode": channel_mode,
|
374 |
+
}
|
375 |
+
|
376 |
+
return Dataset.from_generator(
|
377 |
+
process_split,
|
378 |
+
gen_kwargs=gen_kwargs,
|
379 |
+
features=Features({
|
380 |
+
"audio": Audio(),
|
381 |
+
"start_timestamp": Value(dtype="string"),
|
382 |
+
"start_time_s": Value(dtype="float32"),
|
383 |
+
"start_frame": Value(dtype="uint64"),
|
384 |
+
"end_timestamp": Value(dtype="string"),
|
385 |
+
"end_time_s": Value(dtype="float32"),
|
386 |
+
"end_frame": Value(dtype="uint64"),
|
387 |
+
"duration_s": Value(dtype="float32"),
|
388 |
+
"duration_frames": Value(dtype="uint64"),
|
389 |
+
"transcription": Value(dtype="string"),
|
390 |
+
"mother_tongue": Value(dtype="string"),
|
391 |
+
"participant_id": Value(dtype="string"),
|
392 |
+
"session_id": Value(dtype="string"),
|
393 |
+
"device_id": Value(dtype="string"),
|
394 |
+
"device_channel": Value(dtype="uint8"),
|
395 |
+
"device_distance_mm": Value(dtype="uint16"),
|
396 |
+
"device_type": ClassLabel(
|
397 |
+
num_classes=2,
|
398 |
+
names=["close-talk", "far-field"]
|
399 |
+
),
|
400 |
+
"gender": ClassLabel(
|
401 |
+
num_classes=2,
|
402 |
+
names=["female", "male"]
|
403 |
+
),
|
404 |
+
"nativeness": ClassLabel(
|
405 |
+
num_classes=2,
|
406 |
+
names=["native", "non-native"]
|
407 |
+
),
|
408 |
+
})
|
409 |
+
)
|
410 |
+
|
411 |
+
def synchronize_split(
|
412 |
+
dipco_path: str,
|
413 |
+
dipco_split: str,
|
414 |
+
hub_path: str,
|
415 |
+
hub_split: str,
|
416 |
+
channel_mode: Literal["split", "mixed"] = "split",
|
417 |
+
set_default: bool = False,
|
418 |
+
) -> None:
|
419 |
+
"""
|
420 |
+
Synchronizes a split of the DiPCo dataset to hub.
|
421 |
+
|
422 |
+
:param dipco_path: The path to the DiPCo dataset.
|
423 |
+
:param dipco_split: The split of the DiPCo dataset to process.
|
424 |
+
:param hub_path: The path to the hub dataset.
|
425 |
+
:param hub_split: The split of the hub dataset to push to.
|
426 |
+
:param channel_mode: The channel mode to use for processing.
|
427 |
+
:param set_default: Whether to set the split as the default.
|
428 |
+
:see: get_split_dataset
|
429 |
+
"""
|
430 |
+
dataset = get_split_dataset(
|
431 |
+
dipco_path=dipco_path,
|
432 |
+
dipco_split=dipco_split,
|
433 |
+
channel_mode=channel_mode,
|
434 |
+
)
|
435 |
+
dataset.push_to_hub(
|
436 |
+
hub_path,
|
437 |
+
config_name=f"{channel_mode}-channel",
|
438 |
+
split=hub_split,
|
439 |
+
set_default=set_default,
|
440 |
+
)
|
441 |
+
|
442 |
+
# Helper classes and methods done, this is the main process when running the script
|
443 |
+
if __name__ == "__main__":
|
444 |
+
# If you want to run this script, you can configure the following variables
|
445 |
+
# to your device and repository settings.
|
446 |
+
dipco_path = "./Dipco" # Extracted Dipco dataset
|
447 |
+
hub_path = "benjamin-paine/dinner-party-corpus" # username/repo on huggingface.co
|
448 |
+
channel_modes = ["split", "mixed"] # channel modes to process, first becomes default
|
449 |
+
split_maps = [("dev", "train"), ("eval", "test")] # Map dipco splits to hub splits
|
450 |
+
|
451 |
+
# Done configuring, now run the conversion
|
452 |
+
for i, channel_mode in enumerate(channel_modes):
|
453 |
+
for j, (dipco_split, hub_split) in enumerate(split_maps):
|
454 |
+
synchronize_split(
|
455 |
+
dipco_path=dipco_path,
|
456 |
+
dipco_split=dipco_split,
|
457 |
+
hub_path=hub_path,
|
458 |
+
hub_split=hub_split,
|
459 |
+
channel_mode=channel_mode,
|
460 |
+
set_default=i==0 and j==0,
|
461 |
+
)
|