gcjavi commited on
Commit
781121f
1 Parent(s): 91bc1a9

test new folder structure

Browse files
.gitattributes CHANGED
@@ -54,3 +54,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
  2015-other.tar filter=lfs diff=lfs merge=lfs -text
 
 
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
  2015-other.tar filter=lfs diff=lfs merge=lfs -text
57
+ *.tsv filter=lfs diff=lfs merge=lfs -text
data/2015-other.tar → audio/clean/test/clean_test.tar RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a3cadbf50b6f4f588a2a07c895ba5d0ce0d778cf01263b0cf5aa3ee4fdd47bf7
3
- size 8640757760
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ccbb447d99bb45a93a4b3aef70379bffaf88d9dd36248353c656883449556c9
3
+ size 2715852800
audio/clean/train/clean_train.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c5d6ff4daaee62df0f071b671f7ce02c8c7cc8a5039ced70144ffc477e687bc
3
+ size 7456389120
data/clean/test/clean_test.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:775e5335d84cfadda12e4e91c0c23d65d375f1a77b706ef93dea52e20d7d44cb
3
+ size 1856141
data/clean/train/clean_train.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aeaac895cf84c48497176d49aac9b7f8d44df1389222274235e21bd1ce89df82
3
+ size 7898749
data/transcript.tsv DELETED
@@ -1,2 +0,0 @@
1
- path sentence
2
- DSPG_137_23122015_9873.69_9888.03.wav THIS IS A SAMPLE SENTENCE
 
 
 
my_dataset.py DELETED
@@ -1,132 +0,0 @@
1
- import LANGUAGES as LANGUAGES
2
- import STATS as STATS
3
- import datasets as datasets
4
- from datasets.utils.py_utils import size_str
5
-
6
- _HOMEPAGE = "homepage-info"
7
- _CITATION = "citation-info"
8
- _LICENSE = "license-info"
9
- _DESCRIPTION = "description-info"
10
-
11
- _PROMPTS_URLS = "....."
12
- _DATA_URL = "...."
13
-
14
-
15
-
16
- """Configuration class, allows to have multiple configurations if needed"""
17
- class ParlaSpeechDatasetConfig(datasets.BuilderConfig):
18
- """BuilderConfig for ParlaSpeech"""
19
-
20
- def __init__(self, name, version, **kwargs):
21
- self.language = kwargs.pop("language", None)
22
- self.release_date = kwargs.pop("release_date", None)
23
- self.num_clips = kwargs.pop("num_clips", None)
24
- self.num_speakers = kwargs.pop("num_speakers", None)
25
- self.validated_hr = kwargs.pop("validated_hr", None)
26
- self.total_hr = kwargs.pop("total_hr", None)
27
- self.size_bytes = kwargs.pop("size_bytes", None)
28
- self.size_human = size_str(self.size_bytes)
29
- description = ( ##Update Description in the final version
30
- f"ParlaSpeech is a dataset in {self.language} released on {self.release_date}. "
31
- )
32
- super(ParlaSpeechDatasetConfig, self).__init__(
33
- name=name,
34
- version=datasets.Version(version),
35
- description=description,
36
- **kwargs,
37
- )
38
-
39
-
40
- class ParlaSpeechDataset(datasets.GeneratroBasedBuilder):
41
-
42
- """"
43
- ### NO TENGO CLARO SI HACE FALTA ESTO ###
44
- DEFAULT_CONFIG_NAME = "all"
45
-
46
- BUILDER_CONFIGS = [
47
- ParlaSpeechDatasetConfig(
48
- name=lang,
49
- version=STATS["version"],
50
- language=LANGUAGES[lang],
51
- release_date=STATS["date"],
52
- num_clips=lang_stats["clips"],
53
- num_speakers=lang_stats["users"],
54
- total_hr=float(lang_stats["totalHrs"]) if lang_stats["totalHrs"] else None,
55
- size_bytes=int(lang_stats["size"]) if lang_stats["size"] else None,
56
- )
57
- for lang, lang_stats in STATS["locales"].items()
58
- ]
59
- """
60
-
61
- """ When the dataset is loaded and .info is called, the info defined here is displayed."""
62
- def _info(self):
63
- return datasets.DatasetInfo(
64
- description=_DESCRIPTION,
65
- features=datasets.Features(
66
- {
67
- #"speaker_id": datasets.Value("string"),
68
- #"path": datasets.Value("string"),
69
- "path": datasets.Audio(sampling_rate=16_000),
70
- "sentence": datasets.Value("string"),
71
- }
72
- ),
73
- supervised_keys=None,
74
- homepage=_HOMEPAGE,
75
- license=_LICENSE,
76
- citation=_CITATION,
77
- version = self.config.version,
78
- )
79
-
80
- " Used to organize the audio files and sentence prompts in each split, once downloaded the dataset."
81
- def _split_generators(self, dl_manager):
82
- """Returns SplitGenerators"""
83
- prompts_paths = dl_manager.download(_PROMPTS_URLS)
84
- archive = dl_manager.download(_DATA_URL)
85
- ## local_extracted_archives = dl_manager.extract(archive)
86
- train_dir = "vivos/train"
87
- test_dir = "vivos/test"
88
-
89
- return [
90
- datasets.SplitGenerator(
91
- name=datasets.Split.TRAIN,
92
- gen_kwargs={
93
- "prompts_path": prompts_paths["train"],
94
- "path_to_clips": train_dir + "/waves",
95
- "audio_files": dl_manager.iter_archive(archive),
96
- },
97
- ),
98
- datasets.SplitGenerator(
99
- name=datasets.Split.TEST,
100
- gen_kwargs={
101
- "prompts_path": prompts_paths["test"],
102
- "path_to_clips": test_dir + "/waves",
103
- "audio_files": dl_manager.iter_archive(archive),
104
- },
105
- ),
106
- ]
107
-
108
- def _generate_examples(self, prompts_path, path_to_clips, audio_files):
109
- """Yields examples as (key, example) tuples."""
110
- examples = {}
111
- with open(prompts_path, encoding="utf-8") as f: ##prompts_path -> transcript.tsv
112
- for row in f:
113
- data = row.strip().split(" ", 1)
114
- #speaker_id = data[0].split("_")[0]
115
- #audio_path = "/".join([path_to_clips, speaker_id, data[0] + ".wav"])
116
- audio_path = "/".join([path_to_clips, "DSPG_137_23122015_9873.69_9888.03.wav"])
117
- examples[audio_path] = {
118
- #"speaker_id": speaker_id,
119
- "path": audio_path,
120
- "sentence": data[1],
121
- }
122
- inside_clips_dir = False
123
- id_ = 0
124
- for path, f in audio_files:
125
- if path.startswith(path_to_clips):
126
- inside_clips_dir = True
127
- if path in examples:
128
- audio = {"path": path, "bytes": f.read()}
129
- yield id_, {**examples[path], "audio": audio}
130
- id_ += 1
131
- elif inside_clips_dir:
132
- break
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
nos-parlaspeech.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+
3
+ import pandas as pd
4
+
5
+ import datasets
6
+ from datasets.tasks import AutomaticSpeechRecognition
7
+
8
+
9
+ _CITATION = """\
10
+
11
+ """
12
+
13
+ _DESCRIPTION = """\
14
+
15
+ """
16
+
17
+ _HOMEPAGE = "https://zenodo.org/record/5541827"
18
+
19
+ _LICENSE = "Creative Commons Attribution 4.0 International"
20
+
21
+ _INDEX_REPO = "https://huggingface.co/datasets/proxectonos/Nos_Parlaspeech-GL/tree/main/"
22
+ #_INDEX_REPO = "https://huggingface.co/datasets/proxectonos/Nos_Parlaspeech-GL/resolve/main/"
23
+
24
+ _URLS = {
25
+ "index": _INDEX_REPO + "data/{config}/{split}/{config}_{split}.tsv",
26
+ "audio": "audio/{config}/{split}/{config}_{split}.tar",
27
+ #"audio": "audio/{config}/{split}/{config}_{split}.tar?download=1",
28
+ }
29
+ _SPLITS = {datasets.Split.TRAIN: "train", datasets.Split.VALIDATION: "dev", datasets.Split.TEST: "test"}
30
+
31
+
32
+ class ParlaSpeech(datasets.GeneratorBasedBuilder):
33
+ """Nos-ParlaSpeech."""
34
+
35
+ VERSION = datasets.Version("1.0")
36
+
37
+ BUILDER_CONFIGS = [
38
+ datasets.BuilderConfig(name="clean", version=VERSION, description="XX hours of clean quality segments."),
39
+ datasets.BuilderConfig(name="other", version=VERSION, description="XX hours of other quality segments."),
40
+ ]
41
+
42
+ def _info(self):
43
+ return datasets.DatasetInfo(
44
+ description=_DESCRIPTION,
45
+ features=datasets.Features(
46
+ {
47
+ "path": datasets.Value("string"), #path to wav file
48
+ "audio": datasets.features.Audio(),
49
+ "speaker_id": datasets.Value("int64"),
50
+ "sentence": datasets.Value("string"),
51
+ "gender": datasets.ClassLabel(names=["F", "M"]),
52
+ "duration": datasets.Value("float64"),
53
+ }
54
+ ),
55
+ supervised_keys=None,
56
+ homepage=_HOMEPAGE,
57
+ license=_LICENSE,
58
+ citation=_CITATION,
59
+ task_templates=[
60
+ AutomaticSpeechRecognition(transcription_column="sentence")
61
+ ],
62
+ )
63
+
64
+ def _split_generators(self, dl_manager):
65
+ urls = {
66
+ split: {key: url.format(config=self.config.name, split=_SPLITS[split]) for key, url in _URLS.items()}
67
+ for split in _SPLITS
68
+ }
69
+ dl_dir = dl_manager.download(urls)
70
+ return [
71
+ datasets.SplitGenerator(
72
+ name=split,
73
+ gen_kwargs={
74
+ "index_path": dl_dir[split]["index"],
75
+ "audio_files": dl_manager.iter_archive(dl_dir[split]["audio"]),
76
+ },
77
+ )
78
+ for split in _SPLITS
79
+ ]
80
+
81
+ def _generate_examples(self, index_path, audio_files):
82
+ with open(index_path, encoding="utf-8") as index_file:
83
+ index = pd.read_csv(index_file, delimiter="\t", index_col="path").to_dict(orient="index")
84
+ # clean: 83568 = 79269 + 2155 + 2144 ; other: 146669 = 142813 + 1957 + 1899
85
+ for key, (path, file) in enumerate(audio_files):
86
+ if path.endswith(".wav"):
87
+ data = index.pop(path)
88
+ audio = {"path": path, "bytes": file.read()}
89
+ yield key, {"path": path, "audio": audio, **data}
90
+ else:
91
+ path = path + ".wav"
92
+ data = index.pop(path)
93
+ audio = {"path": path, "bytes": file.read()}
94
+ yield key, {"path": path, "audio": audio, **data}