Edmon02 commited on
Commit
bfef8de
1 Parent(s): 0254805

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +103 -37
app.py CHANGED
@@ -7,22 +7,42 @@ import datasets
7
 
8
 
9
  _DESCRIPTION = """
10
- A large-scale speech corpus for representation learning, semi-supervised learning and interpretation.
11
  """
12
 
13
  _CITATION = """
14
- @inproceedings{}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  """
16
 
17
- _HOMEPAGE = ""
18
 
19
- _LICENSE = ""
20
 
21
  _ASR_LANGUAGES = [
22
  "hy"
23
  ]
24
  _ASR_ACCENTED_LANGUAGES = [
25
- ""
26
  ]
27
 
28
  _LANGUAGES = _ASR_LANGUAGES + _ASR_ACCENTED_LANGUAGES
@@ -67,38 +87,84 @@ class HySpeech(datasets.GeneratorBasedBuilder):
67
  )
68
 
69
  def _split_generators(self, dl_manager):
70
- # Define paths to your train, dev, and test data
71
- data_dir = "data/"
72
- train_data_dir = os.path.join(data_dir, "train")
73
- dev_data_dir = os.path.join(data_dir, "dev")
74
- test_data_dir = os.path.join(data_dir, "test")
75
-
76
- # Load metadata files for train, dev, and test
77
- train_metadata_path = os.path.join(data_dir, "train.tsv")
78
- dev_metadata_path = os.path.join(data_dir, "dev.tsv")
79
- test_metadata_path = os.path.join(data_dir, "test.tsv")
80
-
81
- # Yield split generators for train, dev, and test
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
  return [
83
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"data_dir": train_data_dir, "metadata_path": train_metadata_path}),
84
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"data_dir": dev_data_dir, "metadata_path": dev_metadata_path}),
85
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"data_dir": test_data_dir, "metadata_path": test_metadata_path}),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
  ]
87
 
88
- def _generate_examples(self, data_dir, metadata_path):
89
- # Load metadata from TSV file
90
- with open(metadata_path, "r") as f:
91
- metadata = csv.DictReader(f, delimiter="\t")
92
-
93
- # Iterate over metadata to yield examples
94
- for row in metadata:
95
- audio_id = row["audio_id"]
96
- audio_path = os.path.join(data_dir, row["audio_path"]) # Adjust column name accordingly
97
- # Load audio file and yield example
98
- with open(audio_path, "rb") as audio_file:
99
- yield audio_id, {
100
- "audio_id": audio_id,
101
- "language": row["language"], # Adjust column name accordingly
102
- "audio": {"path": audio_path, "bytes": audio_file.read()},
103
- # Add other metadata fields as needed
104
- }
 
 
 
7
 
8
 
9
  _DESCRIPTION = """
10
+ A large-scale multilingual speech corpus for representation learning, semi-supervised learning and interpretation.
11
  """
12
 
13
  _CITATION = """
14
+ @inproceedings{wang-etal-2021-voxpopuli,
15
+ title = "{V}ox{P}opuli: A Large-Scale Multilingual Speech Corpus for Representation Learning,
16
+ Semi-Supervised Learning and Interpretation",
17
+ author = "Wang, Changhan and
18
+ Riviere, Morgane and
19
+ Lee, Ann and
20
+ Wu, Anne and
21
+ Talnikar, Chaitanya and
22
+ Haziza, Daniel and
23
+ Williamson, Mary and
24
+ Pino, Juan and
25
+ Dupoux, Emmanuel",
26
+ booktitle = "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics
27
+ and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers)",
28
+ month = aug,
29
+ year = "2021",
30
+ publisher = "Association for Computational Linguistics",
31
+ url = "https://aclanthology.org/2021.acl-long.80",
32
+ doi = "10.18653/v1/2021.acl-long.80",
33
+ pages = "993--1003",
34
+ }
35
  """
36
 
37
+ _HOMEPAGE = "https://github.com/facebookresearch/voxpopuli"
38
 
39
+ _LICENSE = "CC0, also see https://www.europarl.europa.eu/legal-notice/en/"
40
 
41
  _ASR_LANGUAGES = [
42
  "hy"
43
  ]
44
  _ASR_ACCENTED_LANGUAGES = [
45
+ "en_accented"
46
  ]
47
 
48
  _LANGUAGES = _ASR_LANGUAGES + _ASR_ACCENTED_LANGUAGES
 
87
  )
88
 
89
  def _split_generators(self, dl_manager):
90
+ n_shards_path = dl_manager.download_and_extract(_N_SHARDS_FILE)
91
+ with open(n_shards_path) as f:
92
+ n_shards = json.load(f)
93
+
94
+ splits = ["train", "dev", "test"]
95
+
96
+ audio_urls = defaultdict(dict)
97
+ for split in splits:
98
+ audio_urls[split] = [_AUDIO_ARCHIVE_PATH.format(split=split)]
99
+
100
+ meta_urls = defaultdict(dict)
101
+ for split in splits:
102
+ meta_urls[split][lang] = _METADATA_PATH.format(split=split)
103
+
104
+ # dl_manager.download_config.num_proc = len(urls)
105
+
106
+ meta_paths = dl_manager.download_and_extract(meta_urls)
107
+ audio_paths = dl_manager.download(audio_urls)
108
+
109
+ local_extracted_audio_paths = (
110
+ dl_manager.extract(audio_paths) if not dl_manager.is_streaming else
111
+ {
112
+ split: {lang: [None] * len(audio_paths[split]) for lang in self.config.languages} for split in splits
113
+ }
114
+ )
115
+
116
  return [
117
+ datasets.SplitGenerator(
118
+ name=datasets.Split.TRAIN,
119
+ gen_kwargs={
120
+ "audio_archives": {
121
+ lang: [dl_manager.iter_archive(archive) for archive in lang_archives]
122
+ for lang_archives in audio_paths["train"].items()
123
+ },
124
+ "local_extracted_archives_paths": local_extracted_audio_paths["train"],
125
+ "metadata_paths": meta_paths["train"],
126
+ }
127
+ ),
128
+ datasets.SplitGenerator(
129
+ name=datasets.Split.VALIDATION,
130
+ gen_kwargs={
131
+ "audio_archives": {
132
+ lang: [dl_manager.iter_archive(archive) for archive in lang_archives]
133
+ for lang_archives in audio_paths["dev"].items()
134
+ },
135
+ "local_extracted_archives_paths": local_extracted_audio_paths["dev"],
136
+ "metadata_paths": meta_paths["dev"],
137
+ }
138
+ ),
139
+ datasets.SplitGenerator(
140
+ name=datasets.Split.TEST,
141
+ gen_kwargs={
142
+ "audio_archives": {
143
+ lang: [dl_manager.iter_archive(archive) for archive in lang_archives]
144
+ for lang_archives in audio_paths["test"].items()
145
+ },
146
+ "local_extracted_archives_paths": local_extracted_audio_paths["test"],
147
+ "metadata_paths": meta_paths["test"],
148
+ }
149
+ ),
150
  ]
151
 
152
+
153
+
154
+ def _generate_examples(self, audio_archives, local_extracted_archives_paths, metadata_paths):
155
+ features = ["raw_text", "normalized_text", "speaker_id", "gender", "is_gold_transcript", "accent"]
156
+
157
+ meta_path = metadata_paths[lang]
158
+ with open(meta_path) as f:
159
+ metadata = {x["id"]: x for x in csv.DictReader(f, delimiter="\t")}
160
+
161
+ for audio_archive, local_extracted_archive_path in zip(audio_archives[lang], local_extracted_archives_paths[lang]):
162
+ for audio_filename, audio_file in audio_archive:
163
+ audio_id = audio_filename.split(os.sep)[-1].split(".wav")[0]
164
+ path = os.path.join(local_extracted_archive_path, audio_filename) if local_extracted_archive_path else audio_filename
165
+
166
+ yield audio_id, {
167
+ "audio_id": audio_id,
168
+ **{feature: metadata[audio_id][feature] for feature in features},
169
+ "audio": {"path": path, "bytes": audio_file.read()},
170
+ }