cahya commited on
Commit
16ba12d
1 Parent(s): 618144f
Files changed (4) hide show
  1. languages.py +2 -1
  2. release_stats.py +4 -3
  3. test01.py +14 -17
  4. usage.py +1 -1
languages.py CHANGED
@@ -5,5 +5,6 @@ LANGUAGES = {
5
  'id': 'Indonesian',
6
  'min': 'Minangkabau',
7
  'jav': 'Javanese',
8
- 'sun': 'Sundanese'
 
9
  }
 
5
  'id': 'Indonesian',
6
  'min': 'Minangkabau',
7
  'jav': 'Javanese',
8
+ 'sun': 'Sundanese',
9
+ 'all': 'All'
10
  }
release_stats.py CHANGED
@@ -4,9 +4,10 @@ STATS = {
4
  "version": "1.0.0",
5
  "date": "",
6
  "locales": {
7
- "id": {'reportedSentences': 261, 'buckets': {'dev': 3218, 'invalidated': 2454, 'other': 22787, 'reported': 260, 'test': 3622, 'train': 5043, 'validated': 23132}, 'duration': 196639788, 'clips': 48373, 'splits': {'accent': {'': 1}, 'age': {'': 0.26, 'twenties': 0.39, 'thirties': 0.07, 'teens': 0.26, 'fifties': 0, 'fourties': 0.02}, 'gender': {'': 0.26, 'male': 0.41, 'female': 0.29, 'other': 0.04}}, 'users': 416, 'size': 1253048208, 'checksum': '874e959e2ca1aacc502ff969a3e54de792dd41e4f672ae1fd9d38213f4bf4139', 'avgDurationSecs': 4.065, 'validDurationSecs': 94033.274, 'totalHrs': 54.62, 'validHrs': 26.12},
8
- "sun": {'reportedSentences': 261, 'buckets': {'dev': 3218, 'invalidated': 2454, 'other': 22787, 'reported': 260, 'test': 3622, 'train': 5043, 'validated': 23132}, 'duration': 196639788, 'clips': 48373, 'splits': {'accent': {'': 1}, 'age': {'': 0.26, 'twenties': 0.39, 'thirties': 0.07, 'teens': 0.26, 'fifties': 0, 'fourties': 0.02}, 'gender': {'': 0.26, 'male': 0.41, 'female': 0.29, 'other': 0.04}}, 'users': 416, 'size': 1253048208, 'checksum': '874e959e2ca1aacc502ff969a3e54de792dd41e4f672ae1fd9d38213f4bf4139', 'avgDurationSecs': 4.065, 'validDurationSecs': 94033.274, 'totalHrs': 54.62, 'validHrs': 26.12}
 
9
  },
10
- 'totalDuration': 72782088097, 'totalValidDurationSecs': 53904443, 'totalHrs': 20217, 'totalValidHrs': 14973
11
  }
12
 
 
4
  "version": "1.0.0",
5
  "date": "",
6
  "locales": {
7
+ "bal": {'reportedSentences': 261, 'duration': 196639788, 'clips': 48373, 'users': 416, 'size': 1253048208, 'avgDurationSecs': 4.065, 'totalHrs': 54.62},
8
+ "sun": {'reportedSentences': 261, 'duration': 196639788, 'clips': 48373, 'users': 416, 'size': 1253048208, 'avgDurationSecs': 4.065, 'totalHrs': 54.62},
9
+ "all": {'reportedSentences': 261, 'duration': 196639788, 'clips': 48373, 'users': 416, 'size': 1253048208, 'avgDurationSecs': 4.065, 'totalHrs': 54.62},
10
  },
11
+ 'totalDuration': 72782088097, 'totalHrs': 20217
12
  }
13
 
test01.py CHANGED
@@ -17,15 +17,12 @@
17
 
18
  import csv
19
  import os
20
- import urllib
21
 
22
  import datasets
23
- import requests
24
  from datasets.utils.py_utils import size_str
25
- from huggingface_hub import HfApi, HfFolder
26
 
27
- from .languages import LANGUAGES
28
- from .release_stats import STATS
29
 
30
  _CITATION = """\
31
  """
@@ -36,6 +33,7 @@ _LICENSE = "https://creativecommons.org/publicdomain/zero/1.0/"
36
 
37
  _AUDIO_URL = "https://huggingface.co/datasets/cahya/test01/resolve/main/audio.tgz"
38
 
 
39
  class Test01Config(datasets.BuilderConfig):
40
  """BuilderConfig for Test01."""
41
 
@@ -61,8 +59,7 @@ class Test01Config(datasets.BuilderConfig):
61
 
62
 
63
  class Test01(datasets.GeneratorBasedBuilder):
64
- DEFAULT_CONFIG_NAME = "id"
65
- DEFAULT_WRITER_BATCH_SIZE = 1000
66
 
67
  BUILDER_CONFIGS = [
68
  Test01Config(
@@ -72,7 +69,6 @@ class Test01(datasets.GeneratorBasedBuilder):
72
  release_date=STATS["date"],
73
  num_clips=lang_stats["clips"],
74
  num_speakers=lang_stats["users"],
75
- validated_hr=float(lang_stats["validHrs"]) if lang_stats["validHrs"] else None,
76
  total_hr=float(lang_stats["totalHrs"]) if lang_stats["totalHrs"] else None,
77
  size_bytes=int(lang_stats["size"]) if lang_stats["size"] else None,
78
  )
@@ -81,10 +77,10 @@ class Test01(datasets.GeneratorBasedBuilder):
81
 
82
  def _info(self):
83
  total_languages = len(STATS["locales"])
84
- total_valid_hours = STATS["totalValidHrs"]
85
  description = (
86
  "LibriVox-Indonesia is a speech dataset generated from LibriVox with only languages from Indonesia."
87
- f"The dataset currently consists of {total_valid_hours} validated hours of speech "
88
  f" in {total_languages} languages, but more voices and languages are always added."
89
  )
90
  features = datasets.Features(
@@ -142,14 +138,15 @@ class Test01(datasets.GeneratorBasedBuilder):
142
  lines = (line for line in f)
143
  utterances = csv.DictReader(lines)
144
  for row in utterances:
145
- row["path"] = os.path.join(path_to_clips, row["path"])
146
- # if data is incomplete, fill with empty values
147
- for field in data_fields:
148
- if field not in row:
149
- row[field] = ""
150
- metadata[row["path"]] = row
 
151
  for path, f in archive_iterator:
152
- if path.endswith(".mp3") and path in metadata:
153
  result = dict(metadata[path])
154
  # set the audio feature and the path to the extracted file
155
  path = os.path.join(local_extracted_archive, path) if local_extracted_archive else path
 
17
 
18
  import csv
19
  import os
 
20
 
21
  import datasets
 
22
  from datasets.utils.py_utils import size_str
 
23
 
24
+ from languages import LANGUAGES
25
+ from release_stats import STATS
26
 
27
  _CITATION = """\
28
  """
 
33
 
34
  _AUDIO_URL = "https://huggingface.co/datasets/cahya/test01/resolve/main/audio.tgz"
35
 
36
+
37
  class Test01Config(datasets.BuilderConfig):
38
  """BuilderConfig for Test01."""
39
 
 
59
 
60
 
61
  class Test01(datasets.GeneratorBasedBuilder):
62
+ DEFAULT_CONFIG_NAME = "all"
 
63
 
64
  BUILDER_CONFIGS = [
65
  Test01Config(
 
69
  release_date=STATS["date"],
70
  num_clips=lang_stats["clips"],
71
  num_speakers=lang_stats["users"],
 
72
  total_hr=float(lang_stats["totalHrs"]) if lang_stats["totalHrs"] else None,
73
  size_bytes=int(lang_stats["size"]) if lang_stats["size"] else None,
74
  )
 
77
 
78
  def _info(self):
79
  total_languages = len(STATS["locales"])
80
+ total_hours = self.config.total_hr
81
  description = (
82
  "LibriVox-Indonesia is a speech dataset generated from LibriVox with only languages from Indonesia."
83
+ f"The dataset currently consists of {total_hours} hours of speech "
84
  f" in {total_languages} languages, but more voices and languages are always added."
85
  )
86
  features = datasets.Features(
 
138
  lines = (line for line in f)
139
  utterances = csv.DictReader(lines)
140
  for row in utterances:
141
+ if self.config.name == "all" or self.config.name == row["language"]:
142
+ row["path"] = os.path.join(path_to_clips, row["path"])
143
+ # if data is incomplete, fill with empty values
144
+ for field in data_fields:
145
+ if field not in row:
146
+ row[field] = ""
147
+ metadata[row["path"]] = row
148
  for path, f in archive_iterator:
149
+ if path in metadata:
150
  result = dict(metadata[path])
151
  # set the audio feature and the path to the extracted file
152
  path = os.path.join(local_extracted_archive, path) if local_extracted_archive else path
usage.py CHANGED
@@ -2,7 +2,7 @@ from datasets import load_dataset
2
 
3
 
4
  def main():
5
- ds = load_dataset("./test01.py", "sun", ignore_verifications=True)
6
  print(ds)
7
 
8
 
 
2
 
3
 
4
  def main():
5
+ ds = load_dataset("./test01.py", "bal", ignore_verifications=True)
6
  print(ds)
7
 
8