gcjavi commited on
Commit
e9d61e0
1 Parent(s): af75c0a

add script

Browse files
Files changed (1) hide show
  1. my_dataset.py +132 -0
my_dataset.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import LANGUAGES as LANGUAGES
2
+ import STATS as STATS
3
+ import datasets as datasets
4
+ from datasets.utils.py_utils import size_str
5
+
6
+ _HOMEPAGE = "homepage-info"
7
+ _CITATION = "citation-info"
8
+ _LICENSE = "license-info"
9
+ _DESCRIPTION = "description-info"
10
+
11
+ _PROMPTS_URLS = "....."
12
+ _DATA_URL = "...."
13
+
14
+
15
+
16
+ """Configuration class, allows to have multiple configurations if needed"""
17
+ class ParlaSpeechDatasetConfig(datasets.BuilderConfig):
18
+ """BuilderConfig for ParlaSpeech"""
19
+
20
+ def __init__(self, name, version, **kwargs):
21
+ self.language = kwargs.pop("language", None)
22
+ self.release_date = kwargs.pop("release_date", None)
23
+ self.num_clips = kwargs.pop("num_clips", None)
24
+ self.num_speakers = kwargs.pop("num_speakers", None)
25
+ self.validated_hr = kwargs.pop("validated_hr", None)
26
+ self.total_hr = kwargs.pop("total_hr", None)
27
+ self.size_bytes = kwargs.pop("size_bytes", None)
28
+ self.size_human = size_str(self.size_bytes)
29
+ description = ( ##Update Description in the final version
30
+ f"ParlaSpeech is a dataset in {self.language} released on {self.release_date}. "
31
+ )
32
+ super(ParlaSpeechDatasetConfig, self).__init__(
33
+ name=name,
34
+ version=datasets.Version(version),
35
+ description=description,
36
+ **kwargs,
37
+ )
38
+
39
+
40
+ class ParlaSpeechDataset(datasets.GeneratroBasedBuilder):
41
+
42
+ """"
43
+ ### NO TENGO CLARO SI HACE FALTA ESTO ###
44
+ DEFAULT_CONFIG_NAME = "all"
45
+
46
+ BUILDER_CONFIGS = [
47
+ ParlaSpeechDatasetConfig(
48
+ name=lang,
49
+ version=STATS["version"],
50
+ language=LANGUAGES[lang],
51
+ release_date=STATS["date"],
52
+ num_clips=lang_stats["clips"],
53
+ num_speakers=lang_stats["users"],
54
+ total_hr=float(lang_stats["totalHrs"]) if lang_stats["totalHrs"] else None,
55
+ size_bytes=int(lang_stats["size"]) if lang_stats["size"] else None,
56
+ )
57
+ for lang, lang_stats in STATS["locales"].items()
58
+ ]
59
+ """
60
+
61
+ """ When the dataset is loaded and .info is called, the info defined here is displayed."""
62
+ def _info(self):
63
+ return datasets.DatasetInfo(
64
+ description=_DESCRIPTION,
65
+ features=datasets.Features(
66
+ {
67
+ #"speaker_id": datasets.Value("string"),
68
+ "path": datasets.Value("string"),
69
+ "audio": datasets.Audio(sampling_rate=16_000),
70
+ "sentence": datasets.Value("string"),
71
+ }
72
+ ),
73
+ supervised_keys=None,
74
+ homepage=_HOMEPAGE,
75
+ license=_LICENSE,
76
+ citation=_CITATION,
77
+ version = self.config.version,
78
+ )
79
+
80
+ " Used to organize the audio files and sentence prompts in each split, once downloaded the dataset."
81
+ def _split_generators(self, dl_manager):
82
+ """Returns SplitGenerators"""
83
+ prompts_paths = dl_manager.download(_PROMPTS_URLS)
84
+ archive = dl_manager.download(_DATA_URL)
85
+ ## local_extracted_archives = dl_manager.extract(archive)
86
+ train_dir = "vivos/train"
87
+ test_dir = "vivos/test"
88
+
89
+ return [
90
+ datasets.SplitGenerator(
91
+ name=datasets.Split.TRAIN,
92
+ gen_kwargs={
93
+ "prompts_path": prompts_paths["train"],
94
+ "path_to_clips": train_dir + "/waves",
95
+ "audio_files": dl_manager.iter_archive(archive),
96
+ },
97
+ ),
98
+ datasets.SplitGenerator(
99
+ name=datasets.Split.TEST,
100
+ gen_kwargs={
101
+ "prompts_path": prompts_paths["test"],
102
+ "path_to_clips": test_dir + "/waves",
103
+ "audio_files": dl_manager.iter_archive(archive),
104
+ },
105
+ ),
106
+ ]
107
+
108
+ def _generate_examples(self, prompts_path, path_to_clips, audio_files):
109
+ """Yields examples as (key, example) tuples."""
110
+ examples = {}
111
+ with open(prompts_path, encoding="utf-8") as f: ##prompts_path -> transcript.tsv
112
+ for row in f:
113
+ data = row.strip().split(" ", 1)
114
+ #speaker_id = data[0].split("_")[0]
115
+ #audio_path = "/".join([path_to_clips, speaker_id, data[0] + ".wav"])
116
+ audio_path = "/".join([path_to_clips, "DSPG_137_23122015_9873.69_9888.03.wav"])
117
+ examples[audio_path] = {
118
+ #"speaker_id": speaker_id,
119
+ "path": audio_path,
120
+ "sentence": data[1],
121
+ }
122
+ inside_clips_dir = False
123
+ id_ = 0
124
+ for path, f in audio_files:
125
+ if path.startswith(path_to_clips):
126
+ inside_clips_dir = True
127
+ if path in examples:
128
+ audio = {"path": path, "bytes": f.read()}
129
+ yield id_, {**examples[path], "audio": audio}
130
+ id_ += 1
131
+ elif inside_clips_dir:
132
+ break