Datasets:
Create preprocess.py
Browse files- data/preprocess.py +39 -0
data/preprocess.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import csv
|
2 |
+
import os
|
3 |
+
import tarfile
|
4 |
+
from hashlib import sha1
|
5 |
+
|
6 |
+
|
7 |
+
def hexdigest(string):
|
8 |
+
return sha1(string.encode()).hexdigest()
|
9 |
+
|
10 |
+
|
11 |
+
def read_fairseq(root, subset):
|
12 |
+
with open(f"{root}/{subset}/train.tsv", 'r') as tsv, open(f"{root}/{subset}/train.wrd", 'r') as wrd:
|
13 |
+
audio = map(lambda x: x[0], csv.reader(tsv, delimiter='\t'))
|
14 |
+
transcriptions = map(lambda x: x.strip(), wrd.readlines())
|
15 |
+
metadata = [[path, hexdigest(transcription), transcription]
|
16 |
+
for path, transcription in zip(audio, transcriptions)]
|
17 |
+
return metadata[1:]
|
18 |
+
|
19 |
+
|
20 |
+
def shard_wav_files(file_paths, shard_size_limit, output_dir, arcnames=None):
|
21 |
+
shard_index = 1
|
22 |
+
current_shard_size = 0
|
23 |
+
current_tar = None
|
24 |
+
|
25 |
+
for i, file_path in enumerate(file_paths):
|
26 |
+
file_size = os.path.getsize(file_path)
|
27 |
+
|
28 |
+
if not current_tar or current_shard_size + file_size > shard_size_limit:
|
29 |
+
if current_tar: current_tar.close()
|
30 |
+
shard_name = f"shard_{shard_index}.tar"
|
31 |
+
shard_path = os.path.join(output_dir, shard_name)
|
32 |
+
current_tar = tarfile.open(shard_path, "w")
|
33 |
+
shard_index += 1
|
34 |
+
current_shard_size = 0
|
35 |
+
|
36 |
+
current_tar.add(file_path, arcname=arcnames[i] if arcnames else os.path.basename(file_path))
|
37 |
+
current_shard_size += file_size
|
38 |
+
|
39 |
+
if current_tar: current_tar.close()
|