Fathan commited on
Commit
4c9558b
·
1 Parent(s): bf2ea9b
Files changed (2) hide show
  1. generate_dataset.py +4 -13
  2. su_id_asr.py +43 -60
generate_dataset.py CHANGED
@@ -1,19 +1,10 @@
1
  from datasets import load_dataset
2
 
3
  # Menggunakan dataset builder yang telah disiapkan
4
- dataset = load_dataset("./su_id_asr.py", split='train', trust_remote_code=True)
5
- dataset = load_dataset("./su_id_asr.py", split='validation', trust_remote_code=True)
6
- dataset = load_dataset("./su_id_asr.py", split='test', trust_remote_code=True)
7
 
8
  # Cetak beberapa contoh dari split train
9
- print(dataset[0])
10
 
11
- from datasets import load_dataset
12
-
13
- # Menggunakan dataset builder yang telah disiapkan
14
- dataset = load_dataset("./su_id_asr.py", split='train', trust_remote_code=True)
15
- dataset = load_dataset("./su_id_asr.py", split='validation', trust_remote_code=True)
16
- dataset = load_dataset("./su_id_asr.py", split='test', trust_remote_code=True)
17
-
18
- # Cetak beberapa contoh dari split train
19
- print(dataset[0])
 
1
  from datasets import load_dataset
2
 
3
  # Menggunakan dataset builder yang telah disiapkan
4
+ data_train = load_dataset("./su_id_asr.py", split='train', trust_remote_code=True)
5
+ data_validation = load_dataset("./su_id_asr.py", split='validation', trust_remote_code=True)
6
+ data_test = load_dataset("./su_id_asr.py", split='test', trust_remote_code=True)
7
 
8
  # Cetak beberapa contoh dari split train
9
+ print(data_train[0])
10
 
 
 
 
 
 
 
 
 
 
su_id_asr.py CHANGED
@@ -29,8 +29,6 @@ _CITATION = """\
29
  _DESCRIPTION = """\
30
  Sundanese ASR training data set containing ~220K utterances.
31
  This dataset was collected by Google in Indonesia.
32
-
33
-
34
  """
35
 
36
  _HOMEPAGE = "https://indonlp.github.io/nusa-catalogue/card.html?su_id_asr"
@@ -38,13 +36,11 @@ _HOMEPAGE = "https://indonlp.github.io/nusa-catalogue/card.html?su_id_asr"
38
  _LICENSE = "Attribution-ShareAlike 4.0 International."
39
 
40
  _URLs = {
41
- "train": "https://univindonesia-my.sharepoint.com/personal/fathan_naufal_office_ui_ac_id/_layouts/15/download.aspx?share=EXnxiynWF0BBhhJS7_1xPT4BInHRARnyP4nqbpLOZwKuLg",
42
- "validation": "https://univindonesia-my.sharepoint.com/personal/fathan_naufal_office_ui_ac_id/_layouts/15/download.aspx?SourceUrl=/personal/fathan_naufal_office_ui_ac_id/Documents/ES0-kCylYWtDqlpGq428pJYBK4u83a53Dl_zLmY7tg9ycw?e=PqW0tX",
43
- "test": "https://univindonesia-my.sharepoint.com/personal/fathan_naufal_office_ui_ac_id/_layouts/15/download.aspx?SourceUrl=/personal/fathan_naufal_office_ui_ac_id/Documents/EfNmlx62QRVDrBmCRxQvgzEB13AqeJNESYy_pSbjVZV9yg?e=VU2g90",
44
  }
45
 
46
-
47
-
48
  _SUPPORTED_TASKS = [Tasks.SPEECH_RECOGNITION]
49
 
50
  _SOURCE_VERSION = "1.0.0"
@@ -97,71 +93,58 @@ class SuIdASR(datasets.GeneratorBasedBuilder):
97
  )
98
 
99
  def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
100
- # Mengunduh dataset dari tiga tautan yang berbeda (train, validation, test)
101
- train_path = dl_manager.download_and_extract(_URLs["train"])
102
- validation_path = dl_manager.download_and_extract(_URLs["validation"])
103
- test_path = dl_manager.download_and_extract(_URLs["test"])
104
-
105
- # Membagi dataset ke dalam split train, validation, dan test
106
  return [
107
  datasets.SplitGenerator(
108
  name=datasets.Split.TRAIN,
109
- gen_kwargs={"filepath": train_path, "split": "train"},
110
  ),
111
  datasets.SplitGenerator(
112
  name=datasets.Split.VALIDATION,
113
- gen_kwargs={"filepath": validation_path, "split": "validation"},
114
  ),
115
  datasets.SplitGenerator(
116
  name=datasets.Split.TEST,
117
- gen_kwargs={"filepath": test_path, "split": "test"},
118
- ),
119
  ]
120
 
 
121
 
122
-
123
- def _generate_examples(self, filepath: Dict, split: str):
124
- """
125
- Generate examples from the dataset, supporting multiple splits (train, validation, test).
126
- """
127
  if self.config.schema == "source" or self.config.schema == "seacrowd_sptext":
128
- for key, each_filepath in filepath.items():
129
-
130
- tsv_file = os.path.join(each_filepath, "asr_sundanese", "utt_spk_text.tsv")
131
-
132
- with open(tsv_file, "r") as file:
133
- tsv_file = csv.reader(file, delimiter="\t")
134
-
135
- for line in tsv_file:
136
- audio_id, speaker_id, transcription_text = line[0], line[1], line[2]
137
-
138
- # Menentukan path untuk file audio
139
- wav_path = os.path.join(each_filepath, "asr_sundanese", "data", "{}".format(audio_id[:2]), "{}.flac".format(audio_id))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140
 
141
- if os.path.exists(wav_path):
142
- if self.config.schema == "source":
143
- ex = {
144
- "id": audio_id,
145
- "speaker_id": speaker_id,
146
- "path": wav_path,
147
- "audio": wav_path,
148
- "text": transcription_text,
149
- "split": split, # Menyimpan info split
150
- }
151
- yield audio_id, ex
152
- elif self.config.schema == "seacrowd_sptext":
153
- ex = {
154
- "id": audio_id,
155
- "speaker_id": speaker_id,
156
- "path": wav_path,
157
- "audio": wav_path,
158
- "text": transcription_text,
159
- "metadata": {
160
- "speaker_age": None,
161
- "speaker_gender": None,
162
- },
163
- "split": split, # Menyimpan info split
164
- }
165
- yield audio_id, ex
166
  else:
167
- raise ValueError(f"Invalid config: {self.config.name}")
 
29
  _DESCRIPTION = """\
30
  Sundanese ASR training data set containing ~220K utterances.
31
  This dataset was collected by Google in Indonesia.
 
 
32
  """
33
 
34
  _HOMEPAGE = "https://indonlp.github.io/nusa-catalogue/card.html?su_id_asr"
 
36
  _LICENSE = "Attribution-ShareAlike 4.0 International."
37
 
38
  _URLs = {
39
+ "su_id_asr_train": "https://univindonesia-my.sharepoint.com/:u:/g/personal/fathan_naufal_office_ui_ac_id/EXW5a2p9MudJqZjmg35IfEcBDNF_Oe3kJvbj0VmCLp7HZQ?e=NYplel&download=1",
40
+ "su_id_asr_dev": "https://univindonesia-my.sharepoint.com/:u:/g/personal/fathan_naufal_office_ui_ac_id/EWUTzuB1uZxMrliLzFDdwjUBzR4L9kG6oeSqDm5kF-i-0w?e=hwhT11&download=1",
41
+ "su_id_asr_test": "https://univindonesia-my.sharepoint.com/:u:/g/personal/fathan_naufal_office_ui_ac_id/EdDRmrvuER9GsE2W8IgOmroBnhQpj4f4x1JFzl1HJJkLqg?e=Q3aSTg&download=1",
42
  }
43
 
 
 
44
  _SUPPORTED_TASKS = [Tasks.SPEECH_RECOGNITION]
45
 
46
  _SOURCE_VERSION = "1.0.0"
 
93
  )
94
 
95
  def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
 
 
 
 
 
 
96
  return [
97
  datasets.SplitGenerator(
98
  name=datasets.Split.TRAIN,
99
+ gen_kwargs={"filepath": dl_manager.download_and_extract(_URLs["su_id_asr_train"])},
100
  ),
101
  datasets.SplitGenerator(
102
  name=datasets.Split.VALIDATION,
103
+ gen_kwargs={"filepath": dl_manager.download_and_extract(_URLs["su_id_asr_dev"])},
104
  ),
105
  datasets.SplitGenerator(
106
  name=datasets.Split.TEST,
107
+ gen_kwargs={"filepath": dl_manager.download_and_extract(_URLs["su_id_asr_test"])},
108
+ )
109
  ]
110
 
111
+ def _generate_examples(self, filepath: str):
112
 
 
 
 
 
 
113
  if self.config.schema == "source" or self.config.schema == "seacrowd_sptext":
114
+
115
+ tsv_file = os.path.join(filepath, f"asr_sundanese", "utt_spk_text.tsv")
116
+
117
+ with open(tsv_file, "r") as file:
118
+ tsv_file = csv.reader(file, delimiter="\t")
119
+
120
+ for line in tsv_file:
121
+ audio_id, speaker_id, transcription_text = line[0], line[1], line[2]
122
+
123
+ wav_path = os.path.join(filepath, f"asr_sundanese", "{}.flac".format(audio_id))
124
+
125
+ if os.path.exists(wav_path):
126
+ if self.config.schema == "source":
127
+ ex = {
128
+ "id": audio_id,
129
+ "speaker_id": speaker_id,
130
+ "path": wav_path,
131
+ "audio": wav_path,
132
+ "text": transcription_text,
133
+ }
134
+ yield audio_id, ex
135
+ elif self.config.schema == "seacrowd_sptext":
136
+ ex = {
137
+ "id": audio_id,
138
+ "speaker_id": speaker_id,
139
+ "path": wav_path,
140
+ "audio": wav_path,
141
+ "text": transcription_text,
142
+ "metadata": {
143
+ "speaker_age": None,
144
+ "speaker_gender": None,
145
+ },
146
+ }
147
+ yield audio_id, ex
148
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
149
  else:
150
+ raise ValueError(f"Invalid config: {self.config.name}")