carlosdanielhernandezmena commited on
Commit
e11886c
1 Parent(s): 73e199f

Upload toy_corpus_asr_es.py

Browse files
Files changed (1) hide show
  1. toy_corpus_asr_es.py +149 -0
toy_corpus_asr_es.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict
2
+ import os
3
+ import json
4
+ import csv
5
+
6
+ import datasets
7
+
8
+ _NAME="toy_corpus_asr_es"
9
+ _VERSION="1.0.0"
10
+ _AUDIO_EXTENSIONS=".flac"
11
+
12
+ _DESCRIPTION = """
13
+ An extremely small corpus of 40 audio files taken from Common Voice (es) with the objective of testing how to share datasets in Hugging Face.
14
+ """
15
+
16
+ _CITATION = """
17
+ @misc{toy_corpus_asr_es,
18
+ title={Toy Corpus for ASR in Spanish.},
19
+ author={Hernandez Mena, Carlos Daniel},
20
+ year={2022},
21
+ url={https://huggingface.co/datasets/carlosdanielhernandezmena/toy_corpus_asr_es},
22
+ }
23
+ """
24
+
25
+ _HOMEPAGE = "https://huggingface.co/datasets/carlosdanielhernandezmena/toy_corpus_asr_es"
26
+
27
+ _LICENSE = "CC-BY-4.0, See https://creativecommons.org/licenses/by/4.0/"
28
+
29
+ _BASE_DATA_DIR = "corpus/"
30
+ _METADATA_TRAIN = os.path.join(_BASE_DATA_DIR,"files","metadata_train.tsv")
31
+ _METADATA_TEST = os.path.join(_BASE_DATA_DIR,"files", "metadata_test.tsv")
32
+ _METADATA_DEV = os.path.join(_BASE_DATA_DIR,"files", "metadata_dev.tsv")
33
+
34
+ _TARS_TRAIN = os.path.join(_BASE_DATA_DIR,"files","tars_train.paths")
35
+ _TARS_TEST = os.path.join(_BASE_DATA_DIR,"files", "tars_test.paths")
36
+ _TARS_DEV = os.path.join(_BASE_DATA_DIR,"files", "tars_dev.paths")
37
+
38
+ class ToyCorpusAsrEsConfig(datasets.BuilderConfig):
39
+ """BuilderConfig for Toy Corpus ASR ES."""
40
+
41
+ def __init__(self, name, **kwargs):
42
+ name=_NAME
43
+ super().__init__(name=name, **kwargs)
44
+
45
+ class ToyCorpusAsrEs(datasets.GeneratorBasedBuilder):
46
+ """The Toy Corpus ASR ES dataset."""
47
+
48
+ VERSION = datasets.Version(_VERSION)
49
+ BUILDER_CONFIGS = [
50
+ ToyCorpusAsrEsConfig(
51
+ name=_NAME,
52
+ version=datasets.Version(_VERSION),
53
+ )
54
+ ]
55
+
56
+ def _info(self):
57
+ features = datasets.Features(
58
+ {
59
+ "audio_id": datasets.Value("string"),
60
+ "audio": datasets.Audio(sampling_rate=16000),
61
+ "split": datasets.Value("string"),
62
+ "gender": datasets.Value("string"),
63
+ "normalized_text": datasets.Value("string"),
64
+ "relative_path": datasets.Value("string"),
65
+ }
66
+ )
67
+ return datasets.DatasetInfo(
68
+ description=_DESCRIPTION,
69
+ features=features,
70
+ homepage=_HOMEPAGE,
71
+ license=_LICENSE,
72
+ citation=_CITATION,
73
+ )
74
+
75
+ def _split_generators(self, dl_manager):
76
+
77
+ metadata_train=dl_manager.download_and_extract(_METADATA_TRAIN)
78
+ metadata_test=dl_manager.download_and_extract(_METADATA_TEST)
79
+ metadata_dev=dl_manager.download_and_extract(_METADATA_DEV)
80
+
81
+ tars_train=dl_manager.download_and_extract(_TARS_TRAIN)
82
+ tars_test=dl_manager.download_and_extract(_TARS_TEST)
83
+ tars_dev=dl_manager.download_and_extract(_TARS_DEV)
84
+
85
+ hash_tar_files=defaultdict(dict)
86
+ with open(tars_train,'r') as f:
87
+ hash_tar_files['train']=[path.replace('\n','') for path in f]
88
+
89
+ with open(tars_test,'r') as f:
90
+ hash_tar_files['test']=[path.replace('\n','') for path in f]
91
+
92
+ with open(tars_dev,'r') as f:
93
+ hash_tar_files['dev']=[path.replace('\n','') for path in f]
94
+
95
+ hash_meta_paths={"train":metadata_train,"test":metadata_test,"dev":metadata_dev}
96
+ audio_paths = dl_manager.download(hash_tar_files)
97
+
98
+ splits=["train","dev","test"]
99
+ local_extracted_audio_paths = (
100
+ dl_manager.extract(audio_paths) if not dl_manager.is_streaming else
101
+ {
102
+ split:[None] * len(audio_paths[split]) for split in splits
103
+ }
104
+ )
105
+
106
+ return [
107
+ datasets.SplitGenerator(
108
+ name=datasets.Split.TRAIN,
109
+ gen_kwargs={
110
+ "audio_archives":[dl_manager.iter_archive(archive) for archive in audio_paths["train"]],
111
+ "local_extracted_archives_paths": local_extracted_audio_paths["train"],
112
+ "metadata_paths": hash_meta_paths["train"],
113
+ }
114
+ ),
115
+ datasets.SplitGenerator(
116
+ name=datasets.Split.VALIDATION,
117
+ gen_kwargs={
118
+ "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["dev"]],
119
+ "local_extracted_archives_paths": local_extracted_audio_paths["dev"],
120
+ "metadata_paths": hash_meta_paths["dev"],
121
+ }
122
+ ),
123
+ datasets.SplitGenerator(
124
+ name=datasets.Split.TEST,
125
+ gen_kwargs={
126
+ "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["test"]],
127
+ "local_extracted_archives_paths": local_extracted_audio_paths["test"],
128
+ "metadata_paths": hash_meta_paths["test"],
129
+ }
130
+ ),
131
+ ]
132
+
133
+ def _generate_examples(self, audio_archives, local_extracted_archives_paths, metadata_paths):
134
+
135
+ features = ["normalized_text","gender","split","relative_path"]
136
+
137
+ with open(metadata_paths) as f:
138
+ metadata = {x["audio_id"]: x for x in csv.DictReader(f, delimiter="\t")}
139
+
140
+ for audio_archive, local_extracted_archive_path in zip(audio_archives, local_extracted_archives_paths):
141
+ for audio_filename, audio_file in audio_archive:
142
+ #audio_id = audio_filename.split(os.sep)[-1].split(_AUDIO_EXTENSIONS)[0]
143
+ audio_id =os.path.splitext(os.path.basename(audio_filename))[0]
144
+ path = os.path.join(local_extracted_archive_path, audio_filename) if local_extracted_archive_path else audio_filename
145
+ yield audio_id, {
146
+ "audio_id": audio_id,
147
+ **{feature: metadata[audio_id][feature] for feature in features},
148
+ "audio": {"path": path, "bytes": audio_file.read()},
149
+ }