bjelkenhed
commited on
Commit
•
1ede384
1
Parent(s):
5f51769
Metadata support
Browse files- archive/nst/metadata_se_csv.zip +0 -3
- babelbox_voice.py +38 -33
archive/nst/metadata_se_csv.zip
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:94993f25e3cc9415a465dbdb1529a300aca3dc24e0a29cdfb112d153be06f1bc
|
3 |
-
size 19735371
|
|
|
|
|
|
|
|
babelbox_voice.py
CHANGED
@@ -1,20 +1,14 @@
|
|
1 |
""" Babelbox Voice Dataset"""
|
2 |
|
|
|
3 |
import csv
|
4 |
-
import
|
5 |
-
import urllib
|
6 |
-
|
7 |
import datasets
|
8 |
-
import requests
|
9 |
-
import glob
|
10 |
-
import gzip
|
11 |
from typing import List
|
12 |
-
from datasets.utils.py_utils import size_str
|
13 |
-
logger = datasets.logging.get_logger(__name__)
|
14 |
-
import torchaudio
|
15 |
-
import torch
|
16 |
-
from tqdm import tqdm
|
17 |
from pathlib import Path
|
|
|
|
|
|
|
18 |
|
19 |
_CITATION = """\
|
20 |
@inproceedings{babelboxvoice:2022,
|
@@ -92,35 +86,46 @@ class BabelboxVoice(datasets.GeneratorBasedBuilder):
|
|
92 |
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
93 |
|
94 |
archive_paths = dl_manager.download(_DL_URLS)
|
95 |
-
|
96 |
local_extracted_archive_paths = dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {}
|
|
|
|
|
|
|
97 |
|
98 |
-
|
99 |
-
|
100 |
-
#meta_path = dl_manager.download_and_extract(_METADATA_URL)
|
101 |
-
|
102 |
-
meta_archive = dl_manager.iter_archive(_METADATA_URL)
|
103 |
-
|
104 |
-
print(meta_archive)
|
105 |
-
|
106 |
metadata = {}
|
107 |
-
for path, file in
|
108 |
-
|
109 |
-
reader = csv.DictReader(file)
|
110 |
for row in tqdm(reader, desc="Reading metadata..."):
|
111 |
-
|
112 |
-
|
113 |
-
|
|
|
114 |
|
115 |
"""
|
116 |
metadata = {}
|
117 |
-
|
118 |
-
|
119 |
-
for
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
124 |
|
125 |
return [
|
126 |
datasets.SplitGenerator(name=datasets.Split.TRAIN,
|
|
|
1 |
""" Babelbox Voice Dataset"""
|
2 |
|
3 |
+
import os
|
4 |
import csv
|
5 |
+
import codecs
|
|
|
|
|
6 |
import datasets
|
|
|
|
|
|
|
7 |
from typing import List
|
|
|
|
|
|
|
|
|
|
|
8 |
from pathlib import Path
|
9 |
+
from tqdm import tqdm
|
10 |
+
|
11 |
+
logger = datasets.logging.get_logger(__name__)
|
12 |
|
13 |
_CITATION = """\
|
14 |
@inproceedings{babelboxvoice:2022,
|
|
|
86 |
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
87 |
|
88 |
archive_paths = dl_manager.download(_DL_URLS)
|
|
|
89 |
local_extracted_archive_paths = dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {}
|
90 |
+
|
91 |
+
metadata_path = dl_manager.download(_METADATA_URL)
|
92 |
+
local_extracted_metadata_path = dl_manager.extract(metadata_path) if not dl_manager.is_streaming else None
|
93 |
|
94 |
+
metadata_archive = dl_manager.iter_archive(metadata_path)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
95 |
metadata = {}
|
96 |
+
for path, file in metadata_archive:
|
97 |
+
reader = csv.DictReader(codecs.iterdecode(file, 'utf-8'))
|
|
|
98 |
for row in tqdm(reader, desc="Reading metadata..."):
|
99 |
+
filename = row['filename_channel_1']
|
100 |
+
sentence = row['text']
|
101 |
+
metadata[filename] = sentence
|
102 |
+
|
103 |
|
104 |
"""
|
105 |
metadata = {}
|
106 |
+
if dl_manager.is_streaming:
|
107 |
+
meta_archive = dl_manager.iter_archive(_METADATA_URL)
|
108 |
+
for path, file in meta_archive:
|
109 |
+
reader = csv.DictReader(codecs.iterdecode(file, 'utf-8'))
|
110 |
+
for row in tqdm(reader, desc="Reading metadata..."):
|
111 |
+
filename = row['filename_channel_1']
|
112 |
+
sentence = row['text']
|
113 |
+
metadata[filename] = sentence
|
114 |
+
else:
|
115 |
+
metadata_path = dl_manager.download(_METADATA_URL)
|
116 |
+
local_extracted_metadata_path = dl_manager.extract(metadata_path)
|
117 |
+
metadata_path = Path(local_extracted_metadata_path)
|
118 |
+
for filename in metadata_path.glob("*.csv"):
|
119 |
+
with open(filename) as csv_file:
|
120 |
+
reader = csv.DictReader(csv_file)
|
121 |
+
for row in tqdm(reader, desc="Reading metadata..."):
|
122 |
+
filename = row['filename_channel_1']
|
123 |
+
sentence = row['text']
|
124 |
+
metadata[filename] = sentence
|
125 |
+
|
126 |
+
"""
|
127 |
+
|
128 |
+
|
129 |
|
130 |
return [
|
131 |
datasets.SplitGenerator(name=datasets.Split.TRAIN,
|