BenjaminGalliot commited on
Commit
eadda5e
1 Parent(s): 0945e8e

Update pangloss.py

Browse files
Files changed (1) hide show
  1. pangloss.py +18 -16
pangloss.py CHANGED
@@ -17,6 +17,7 @@ import csv
17
  import json
18
  import os
19
  import datasets
 
20
 
21
  _CITATION = {
22
  "yong1288": """
@@ -52,16 +53,22 @@ url = {https://doi.org/10.5281/zenodo.5521112}
52
  }
53
 
54
  _DESCRIPTION = """\
55
- These datasets are extracts from the Pangloss collection and have been preprocessed for ASR experiments in Yongning Na and Japhug.
 
56
  """
57
 
58
  _HOMEPAGE = "https://pangloss.cnrs.fr/"
59
 
60
  _LICENSE = "https://creativecommons.org/licenses/by-nc-sa/4.0/fr/legalcode"
61
 
 
 
 
 
 
62
  _LANGUAGES = {
63
  "yong1288": {
64
- "url": "https://mycore.core-cloud.net/index.php/s/RxOb4ai7GU4x1mQ/download",
65
  "homepage": "https://zenodo.org/record/5336698",
66
  "description": "Yongning Na dataset",
67
  "translations": ["fr", "en", "zh"]
@@ -86,14 +93,6 @@ class PanglossDataset(datasets.GeneratorBasedBuilder):
86
  "traduction:zh": "translation:zh"
87
  }
88
 
89
- VERSION = datasets.Version("1.2.0")
90
-
91
- # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
92
- class NewDataset(datasets.GeneratorBasedBuilder):
93
- """TODO: Short description of my dataset."""
94
-
95
-
96
-
97
  # This is an example of a dataset with multiple configurations.
98
  # If you don't want/need to define several sub-sets in your dataset,
99
  # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
@@ -106,24 +105,25 @@ class NewDataset(datasets.GeneratorBasedBuilder):
106
  # data = datasets.load_dataset('my_dataset', 'first_domain')
107
  # data = datasets.load_dataset('my_dataset', 'second_domain')
108
  BUILDER_CONFIGS = [
109
- datasets.BuilderConfig(name=language_name, version=VERSION, description=language_data["description"])
110
  for language_name, language_data in _LANGUAGES.items()
111
  ]
112
 
113
- #DEFAULT_CONFIG_NAME = "" # It's not mandatory to have a default configuration. Just use one if it make sense.
114
 
115
  def _info(self):
116
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
117
  features = datasets.Features(
118
  {
119
  "path": datasets.Value("string"),
120
- "audio": datasets.Audio(sampling_rate=16_000),
121
  "sentence": datasets.Value("string"),
122
  "doctype": datasets.Value("string"),
123
  "speaker": datasets.Value("string"),
124
  **{f"translation:{language_code}": datasets.Value("string") for language_code in _LANGUAGES[self.config.name]["translations"]}
125
  }
126
  )
 
127
  return datasets.DatasetInfo(
128
  # This is the description that will appear on the datasets page.
129
  description=_DESCRIPTION,
@@ -138,6 +138,8 @@ class NewDataset(datasets.GeneratorBasedBuilder):
138
  license=_LICENSE,
139
  # Citation for the dataset
140
  citation=_CITATION,
 
 
141
  )
142
 
143
  def _split_generators(self, dl_manager):
@@ -190,9 +192,9 @@ class NewDataset(datasets.GeneratorBasedBuilder):
190
  yield key, data
191
 
192
 
193
- # if __name__ == "__main__":
194
- # for language in _LANGUAGES.keys():
195
- # datasets.load_dataset("pangloss.py", language)
196
 
197
  # datasets-cli test datasets/pangloss --save_infos --all_configs
198
  # datasets-cli dummy_data datasets/pangloss --auto_generate
 
17
  import json
18
  import os
19
  import datasets
20
+ from datasets.tasks import AutomaticSpeechRecognition
21
 
22
  _CITATION = {
23
  "yong1288": """
 
53
  }
54
 
55
  _DESCRIPTION = """\
56
+ These datasets are extracts from the Pangloss collection and have
57
+ been preprocessed for ASR experiments in Na and Japhug.
58
  """
59
 
60
  _HOMEPAGE = "https://pangloss.cnrs.fr/"
61
 
62
  _LICENSE = "https://creativecommons.org/licenses/by-nc-sa/4.0/fr/legalcode"
63
 
64
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
65
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
66
+
67
+ _VERSION = datasets.Version("1.0.0")
68
+
69
  _LANGUAGES = {
70
  "yong1288": {
71
+ "url": "https://mycore.core-cloud.net/index.php/s/vaGMeRf4Iij8MWR/download",
72
  "homepage": "https://zenodo.org/record/5336698",
73
  "description": "Yongning Na dataset",
74
  "translations": ["fr", "en", "zh"]
 
93
  "traduction:zh": "translation:zh"
94
  }
95
 
 
 
 
 
 
 
 
 
96
  # This is an example of a dataset with multiple configurations.
97
  # If you don't want/need to define several sub-sets in your dataset,
98
  # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
 
105
  # data = datasets.load_dataset('my_dataset', 'first_domain')
106
  # data = datasets.load_dataset('my_dataset', 'second_domain')
107
  BUILDER_CONFIGS = [
108
+ datasets.BuilderConfig(name=language_name, version=_VERSION, description=language_data["description"])
109
  for language_name, language_data in _LANGUAGES.items()
110
  ]
111
 
112
+ #DEFAULT_CONFIG_NAME = "na" # It's not mandatory to have a default configuration. Just use one if it make sense.
113
 
114
  def _info(self):
115
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
116
  features = datasets.Features(
117
  {
118
  "path": datasets.Value("string"),
119
+ "audio": datasets.features.Audio(sampling_rate=16_000),
120
  "sentence": datasets.Value("string"),
121
  "doctype": datasets.Value("string"),
122
  "speaker": datasets.Value("string"),
123
  **{f"translation:{language_code}": datasets.Value("string") for language_code in _LANGUAGES[self.config.name]["translations"]}
124
  }
125
  )
126
+
127
  return datasets.DatasetInfo(
128
  # This is the description that will appear on the datasets page.
129
  description=_DESCRIPTION,
 
138
  license=_LICENSE,
139
  # Citation for the dataset
140
  citation=_CITATION,
141
+ task_templates=[AutomaticSpeechRecognition(audio_column="audio", transcription_column="forme")],
142
+
143
  )
144
 
145
  def _split_generators(self, dl_manager):
 
192
  yield key, data
193
 
194
 
195
+ if __name__ == "__main__":
196
+ # for language in _LANGUAGES.keys():
197
+ datasets.load_dataset("datasets/pangloss/pangloss.py", "japh1234")
198
 
199
  # datasets-cli test datasets/pangloss --save_infos --all_configs
200
  # datasets-cli dummy_data datasets/pangloss --auto_generate