Datasets:

Multilinguality:
multilingual
Size Categories:
1K<n<10K
Language Creators:
found
Annotations Creators:
found
Source Datasets:
original
License:
system HF staff commited on
Commit
29961b6
1 Parent(s): 6a2c20a

Update files from the datasets library (from 1.16.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.16.0

dataset_infos.json CHANGED
The diff for this file is too large to render. See raw diff
 
dummy/SLR32/0.0.0/dummy_data.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e3fb7c9a12f985ff69e6bea0c686ccad7b2fa10b030018dc68627fcbb6803861
3
- size 4125
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e999b1296558a6cfc46d7b58f7a40525c69a4d1385a018518ae5a71d4a575c58
3
+ size 12652
openslr.py CHANGED
@@ -112,20 +112,6 @@ SLR71, SLR71, SLR72, SLR73, SLR74, SLR75:
112
  ISBN = {979-10-95546-34-4},
113
  }
114
 
115
- SLR83
116
- @inproceedings{demirsahin-etal-2020-open,
117
- title = {{Open-source Multi-speaker Corpora of the English Accents in the British Isles}},
118
- author = {Demirsahin, Isin and Kjartansson, Oddur and Gutkin, Alexander and Rivera, Clara},
119
- booktitle = {Proceedings of The 12th Language Resources and Evaluation Conference (LREC)},
120
- month = may,
121
- year = {2020},
122
- pages = {6532--6541},
123
- address = {Marseille, France},
124
- publisher = {European Language Resources Association (ELRA)},
125
- url = {https://www.aclweb.org/anthology/2020.lrec-1.804},
126
- ISBN = {979-10-95546-34-4},
127
- }
128
-
129
  SLR80
130
  @inproceedings{oo-etal-2020-burmese,
131
  title = {{Burmese Speech Corpus, Finite-State Text Normalization and Pronunciation Grammars with an Application
@@ -176,10 +162,10 @@ _RESOURCES = {
176
  "Setswana and isiXhosa.",
177
  "Files": ["af_za.tar.gz", "st_za.tar.gz", "tn_za.tar.gz", "xh_za.tar.gz"],
178
  "IndexFiles": [
179
- "af_za/za/afr/line_index.tsv",
180
- "st_za/za/sso/line_index.tsv",
181
- "tn_za/za/tsn/line_index.tsv",
182
- "xh_za/za/xho/line_index.tsv",
183
  ],
184
  "DataDirs": ["af_za/za/afr/wavs", "st_za/za/sso/wavs", "tn_za/za/tsn/wavs", "xh_za/za/xho/wavs"],
185
  },
@@ -493,39 +479,6 @@ _RESOURCES = {
493
  "IndexFiles": ["line_index.tsv"],
494
  "DataDirs": [""],
495
  },
496
- "SLR83": {
497
- "Language": "English",
498
- "LongName": "Crowdsourced high-quality UK and Ireland English Dialect speech data set",
499
- "Category": "Speech",
500
- "Summary": "Data set which contains male and female recordings of English from various dialects of the UK and Ireland",
501
- "Files": [
502
- "irish_english_male.zip",
503
- "midlands_english_female.zip",
504
- "midlands_english_male.zip",
505
- "northern_english_female.zip",
506
- "northern_english_male.zip",
507
- "scottish_english_female.zip",
508
- "scottish_english_male.zip",
509
- "southern_english_female.zip",
510
- "southern_english_male.zip",
511
- "welsh_english_female.zip",
512
- "welsh_english_male.zip",
513
- ],
514
- "IndexFiles": [
515
- "line_index.csv",
516
- "line_index.csv",
517
- "line_index.csv",
518
- "line_index.csv",
519
- "line_index.csv",
520
- "line_index.csv",
521
- "line_index.csv",
522
- "line_index.csv",
523
- "line_index.csv",
524
- "line_index.csv",
525
- "line_index.csv",
526
- ],
527
- "DataDirs": ["", "", "", "", "", "", "", "", "", "", ""],
528
- },
529
  "SLR86": {
530
  "Language": "Yoruba",
531
  "LongName": "Crowdsourced high-quality Yoruba speech data set",
@@ -565,6 +518,7 @@ class OpenSlrConfig(datasets.BuilderConfig):
565
 
566
 
567
  class OpenSlr(datasets.GeneratorBasedBuilder):
 
568
 
569
  BUILDER_CONFIGS = [
570
  OpenSlrConfig(
@@ -605,21 +559,28 @@ class OpenSlr(datasets.GeneratorBasedBuilder):
605
  """Returns SplitGenerators."""
606
  resource_number = self.config.name.replace("SLR", "")
607
  urls = [f"{_DATA_URL.format(resource_number)}/{file}" for file in self.config.files]
608
- dl_paths = dl_manager.download_and_extract(urls)
609
- abs_path_to_indexs = [os.path.join(path, f"{self.config.index_files[i]}") for i, path in enumerate(dl_paths)]
610
- abs_path_to_datas = [os.path.join(path, f"{self.config.data_dirs[i]}") for i, path in enumerate(dl_paths)]
 
 
 
 
 
 
611
 
612
  return [
613
  datasets.SplitGenerator(
614
  name=datasets.Split.TRAIN,
615
  gen_kwargs={
616
- "path_to_indexs": abs_path_to_indexs,
617
- "path_to_datas": abs_path_to_datas,
 
618
  },
619
  ),
620
  ]
621
 
622
- def _generate_examples(self, path_to_indexs, path_to_datas):
623
  """Yields examples."""
624
 
625
  counter = -1
@@ -640,16 +601,26 @@ class OpenSlr(datasets.GeneratorBasedBuilder):
640
  sentence = sentence_index[filename]
641
  counter += 1
642
  yield counter, {"path": path, "audio": path, "sentence": sentence}
643
- elif self.config.name in ["SLR83"]:
644
- for i, path_to_index in enumerate(path_to_indexs):
 
645
  with open(path_to_index, encoding="utf-8") as f:
646
- lines = f.readlines()
647
- for id_, line in enumerate(lines):
648
- field_values = re.split(r",\s?", line.strip())
649
- user_id, filename, sentence = field_values
650
- path = os.path.join(path_to_datas[i], f"{filename}.wav")
 
 
 
 
 
 
 
 
651
  counter += 1
652
- yield counter, {"path": path, "audio": path, "sentence": sentence}
 
653
  else:
654
  for i, path_to_index in enumerate(path_to_indexs):
655
  with open(path_to_index, encoding="utf-8") as f:
 
112
  ISBN = {979-10-95546-34-4},
113
  }
114
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
  SLR80
116
  @inproceedings{oo-etal-2020-burmese,
117
  title = {{Burmese Speech Corpus, Finite-State Text Normalization and Pronunciation Grammars with an Application
 
162
  "Setswana and isiXhosa.",
163
  "Files": ["af_za.tar.gz", "st_za.tar.gz", "tn_za.tar.gz", "xh_za.tar.gz"],
164
  "IndexFiles": [
165
+ "https://s3.amazonaws.com/datasets.huggingface.co/openslr/SLR32/af_za/line_index.tsv",
166
+ "https://s3.amazonaws.com/datasets.huggingface.co/openslr/SLR32/st_za/line_index.tsv",
167
+ "https://s3.amazonaws.com/datasets.huggingface.co/openslr/SLR32/tn_za/line_index.tsv",
168
+ "https://s3.amazonaws.com/datasets.huggingface.co/openslr/SLR32/xh_za/line_index.tsv",
169
  ],
170
  "DataDirs": ["af_za/za/afr/wavs", "st_za/za/sso/wavs", "tn_za/za/tsn/wavs", "xh_za/za/xho/wavs"],
171
  },
 
479
  "IndexFiles": ["line_index.tsv"],
480
  "DataDirs": [""],
481
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
482
  "SLR86": {
483
  "Language": "Yoruba",
484
  "LongName": "Crowdsourced high-quality Yoruba speech data set",
 
518
 
519
 
520
  class OpenSlr(datasets.GeneratorBasedBuilder):
521
+ DEFAULT_WRITER_BATCH_SIZE = 32
522
 
523
  BUILDER_CONFIGS = [
524
  OpenSlrConfig(
 
559
  """Returns SplitGenerators."""
560
  resource_number = self.config.name.replace("SLR", "")
561
  urls = [f"{_DATA_URL.format(resource_number)}/{file}" for file in self.config.files]
562
+ if urls[0].endswith(".zip"):
563
+ dl_paths = dl_manager.download_and_extract(urls)
564
+ path_to_indexs = [os.path.join(path, f"{self.config.index_files[i]}") for i, path in enumerate(dl_paths)]
565
+ path_to_datas = [os.path.join(path, f"{self.config.data_dirs[i]}") for i, path in enumerate(dl_paths)]
566
+ archives = None
567
+ else:
568
+ archives = dl_manager.download(urls)
569
+ path_to_indexs = dl_manager.download(self.config.index_files)
570
+ path_to_datas = self.config.data_dirs
571
 
572
  return [
573
  datasets.SplitGenerator(
574
  name=datasets.Split.TRAIN,
575
  gen_kwargs={
576
+ "path_to_indexs": path_to_indexs,
577
+ "path_to_datas": path_to_datas,
578
+ "archive_files": [dl_manager.iter_archive(archive) for archive in archives] if archives else None,
579
  },
580
  ),
581
  ]
582
 
583
+ def _generate_examples(self, path_to_indexs, path_to_datas, archive_files):
584
  """Yields examples."""
585
 
586
  counter = -1
 
601
  sentence = sentence_index[filename]
602
  counter += 1
603
  yield counter, {"path": path, "audio": path, "sentence": sentence}
604
+ elif self.config.name in ["SLR32"]: # use archives
605
+ for path_to_index, path_to_data, files in zip(path_to_indexs, path_to_datas, archive_files):
606
+ sentences = {}
607
  with open(path_to_index, encoding="utf-8") as f:
608
+ for line in f:
609
+ # Following regexs are needed to normalise the lines, since the datasets
610
+ # are not always consistent and have bugs:
611
+ line = re.sub(r"\t[^\t]*\t", "\t", line.strip())
612
+ field_values = re.split(r"\t\t?", line)
613
+ if len(field_values) != 2:
614
+ continue
615
+ filename, sentence = field_values
616
+ # set absolute path for audio file
617
+ path = f"{path_to_data}/{filename}.wav"
618
+ sentences[path] = sentence
619
+ for path, f in files:
620
+ if path.startswith(path_to_data):
621
  counter += 1
622
+ audio = {"path": path, "bytes": f.read()}
623
+ yield counter, {"path": path, "audio": audio, "sentence": sentences[path]}
624
  else:
625
  for i, path_to_index in enumerate(path_to_indexs):
626
  with open(path_to_index, encoding="utf-8") as f: