holylovenia commited on
Commit
b5cb176
1 Parent(s): 1291016

Upload bloom_lm.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. bloom_lm.py +247 -0
bloom_lm.py ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ SEA Crowd Data Loader for Bloom LM.
3
+ """
4
+ from typing import Dict, Iterator, List, Tuple
5
+
6
+ import datasets
7
+ from datasets.download.download_manager import DownloadManager
8
+
9
+ from seacrowd.utils import schemas
10
+ from seacrowd.utils.configs import SEACrowdConfig
11
+ from seacrowd.utils.constants import TASK_TO_SCHEMA, Licenses, Tasks
12
+
13
+ _CITATION = r"""
14
+ @inproceedings{leong-etal-2022-bloom,
15
+ title = "Bloom Library: Multimodal Datasets in 300+ Languages for a Variety of Downstream Tasks",
16
+ author = "Leong, Colin and
17
+ Nemecek, Joshua and
18
+ Mansdorfer, Jacob and
19
+ Filighera, Anna and
20
+ Owodunni, Abraham and
21
+ Whitenack, Daniel",
22
+ editor = "Goldberg, Yoav and
23
+ Kozareva, Zornitsa and
24
+ Zhang, Yue",
25
+ booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing",
26
+ month = dec,
27
+ year = "2022",
28
+ address = "Abu Dhabi, United Arab Emirates",
29
+ publisher = "Association for Computational Linguistics",
30
+ url = "https://aclanthology.org/2022.emnlp-main.590",
31
+ doi = "10.18653/v1/2022.emnlp-main.590",
32
+ pages = "8608--8621",
33
+ }
34
+ """
35
+
36
+ logger = datasets.logging.get_logger(__name__)
37
+
38
+ # this config is created for SEACrowd Dataloader
39
+ _LANG_CONFIG = {
40
+ "abc": "Ambala Ayta",
41
+ "ahk": "Akha",
42
+ "bfn": "Bunak",
43
+ "bjn": "Banjar",
44
+ "bkx": "Baikeno",
45
+ "brb": "Brao",
46
+ "brv": "Western Bru",
47
+ "bya": "Batak",
48
+ "bzi": "Bisu",
49
+ "ceb": "Cebuano",
50
+ "cgc": "Kagayanen",
51
+ "cmo": "Central Mnong",
52
+ "ddg": "Fataluku",
53
+ "dmg": "Upper Kinabatangan",
54
+ "dnw": "Western Dani",
55
+ "dtp": "Kadazan Dusun",
56
+ "dtr": "Lotud",
57
+ "enc": "En",
58
+ "fil": "Filipino",
59
+ "gal": "Galolen",
60
+ "hil": "Hiligaynon",
61
+ "hre": "Hre",
62
+ "hro": "Haroi",
63
+ "idt": "Idaté",
64
+ "ilo": "Ilocano",
65
+ "ind": "Indonesian",
66
+ "jra": "Jarai",
67
+ "kak": "Kalanguya",
68
+ "khb": "Lü",
69
+ "khm": "Khmer",
70
+ "kqr": "Kimaragang",
71
+ "krr": "Krung",
72
+ "ksw": "S’gaw Karen",
73
+ "kvt": "Lahta",
74
+ "lao": "Lao",
75
+ "lhu": "Lahu",
76
+ "llg": "Lole",
77
+ "lsi": "Lacid",
78
+ "lwl": "Eastern Lawa",
79
+ "mdr": "Mandar",
80
+ "mgm": "Mambae",
81
+ "mhx": "Lhao Vo",
82
+ "mkz": "Makasae",
83
+ "mnw": "Mon",
84
+ "mqj": "Mamasa",
85
+ "mry": "Mandaya",
86
+ "msb": "Masbatenyo",
87
+ "mya": "Burmese",
88
+ "nod": "Northern Thai",
89
+ "nst": "Tangshang Naga",
90
+ "nxa": "Nauete",
91
+ "nxl": "South Nuaulu",
92
+ "pag": "Pangasinan",
93
+ "pce": "Ruching Palaung",
94
+ "pdu": "Kayan",
95
+ "pea": "Peranakan Indonesian",
96
+ "pmf": "Pamona",
97
+ "psp_ceb": "Filipino Sign Language",
98
+ "sea": "Semai",
99
+ "sgd": "Surigaonon",
100
+ "shn": "Shan",
101
+ "sml": "Central Sama",
102
+ "snl": "Sangil",
103
+ "tdt": "Tetun Dili",
104
+ "tet": "Tetun",
105
+ "tha": "Thai",
106
+ "tkd": "Tukudede",
107
+ "tnt": "Tontemboan",
108
+ "tom": "Tombulu",
109
+ "tpu": "Tampuan",
110
+ "vie": "Vietnamese",
111
+ "war": "Waray-Waray",
112
+ "wms": "Wambon",
113
+ "wnk": "Wanukaka",
114
+ "xmm": "Manado Malay",
115
+ "yet": "Yetfa",
116
+ "yin": "Riang Lai",
117
+ "zlm": "Malay",
118
+ }
119
+
120
+ _LOCAL = False
121
+ _LANGUAGES = list(_LANG_CONFIG.keys())
122
+
123
+ _DATASETNAME = "bloom_lm"
124
+ _DESCRIPTION = r"""
125
+ This is a Bloom Library dataset developed for the self-supervised language modeling task.
126
+ It covers 74 languages indigenous to SEA overall, amounting to total data of 21K.
127
+ This dataset belongs to a CC license, where its datapoints has specific license attached to it.
128
+ Before using this dataloader, please accept the acknowledgement at https://huggingface.co/datasets/sil-ai/bloom-lm and use huggingface-cli login for authentication.
129
+ """
130
+
131
+ _HOMEPAGE = "https://huggingface.co/datasets/sil-ai/bloom-lm"
132
+ _LICENSE = Licenses.CC.value
133
+
134
+ _URL = "https://huggingface.co/datasets/sil-ai/bloom-lm"
135
+ _HF_REMOTE_REF = "/".join(_URL.split("/")[-2:])
136
+
137
+ _SUPPORTED_TASKS = [Tasks.SELF_SUPERVISED_PRETRAINING]
138
+ _SOURCE_VERSION = "0.1.0"
139
+ _SEACROWD_VERSION = "2024.06.20"
140
+
141
+ CONFIG_SUFFIXES_FOR_TASK = [TASK_TO_SCHEMA.get(task).lower() for task in _SUPPORTED_TASKS]
142
+
143
+
144
+ def construct_configs_on_langs() -> List[SEACrowdConfig]:
145
+ """
146
+ The function `construct_configs` constructs a list of SEACrowdConfig objects based on `_LANGUAGES` var, and returns the list.
147
+
148
+ output:
149
+ a list of `SEACrowdConfig` objects based on instantiated init variables
150
+ """
151
+
152
+ # set output var
153
+ config_list = []
154
+
155
+ # construct zipped arg for config instantiation
156
+ TASKS_AND_CONFIG_SUFFIX_PAIRS = list(zip(_SUPPORTED_TASKS, CONFIG_SUFFIXES_FOR_TASK))
157
+
158
+ # implement source schema
159
+ version, config_name_prefix = _SOURCE_VERSION, "source"
160
+ config_list += [
161
+ SEACrowdConfig(
162
+ name=f"{_DATASETNAME}_{_LANG}_{config_name_prefix}",
163
+ version=datasets.Version(version),
164
+ description=f"{_DATASETNAME} {config_name_prefix} schema for language code {_LANG}",
165
+ schema=f"{config_name_prefix}",
166
+ # since the actual subset_id in source for "psp_ceb" is "psp", we are defining the subset_id as following for loading to source HF
167
+ subset_id=_LANG if _LANG != "psp_ceb" else "psp",
168
+ )
169
+ for _LANG in _LANGUAGES
170
+ ]
171
+
172
+ # implement SEACrowd schema
173
+ version, config_name_prefix = _SEACROWD_VERSION, "seacrowd"
174
+ for task_obj, config_name_suffix in TASKS_AND_CONFIG_SUFFIX_PAIRS:
175
+ config_list += [
176
+ SEACrowdConfig(
177
+ name=f"{_DATASETNAME}_{_LANG}_{config_name_prefix}_{config_name_suffix}",
178
+ version=datasets.Version(version),
179
+ description=f"{_DATASETNAME} {config_name_prefix} schema for {task_obj.name} and language code {_LANG}",
180
+ schema=f"{config_name_prefix}_{config_name_suffix}",
181
+ # since the actual subset_id in source for "psp_ceb" is "psp", we are defining the subset_id as following for loading to source HF
182
+ subset_id=_LANG if _LANG != "psp_ceb" else "psp",
183
+ )
184
+ for _LANG in _LANGUAGES
185
+ ]
186
+ return config_list
187
+
188
+
189
+ class BloomLMDataset(datasets.GeneratorBasedBuilder):
190
+ """Bloom LM dataset, subsetted from https://huggingface.co/datasets/sil-ai/bloom-lm"""
191
+
192
+ # get all schema w/o lang arg + get all schema w/ lang arg
193
+ BUILDER_CONFIGS = construct_configs_on_langs()
194
+
195
+ def _info(self) -> datasets.DatasetInfo:
196
+ _config_schema_name = self.config.schema
197
+ logger.info(f"Received schema name: {self.config.schema}")
198
+ # source schema
199
+ if _config_schema_name == "source":
200
+ features = datasets.Features(
201
+ {
202
+ "text": datasets.Value("string"),
203
+ "title": datasets.Value("string"),
204
+ "license": datasets.Value("string"),
205
+ "copyright": datasets.Value("string"),
206
+ "pageCount": datasets.Value("int32"),
207
+ "bookInstanceId": datasets.Value("string"),
208
+ "bookLineage": datasets.Value("string"),
209
+ }
210
+ )
211
+
212
+ # ssp schema
213
+ elif _config_schema_name == "seacrowd_ssp":
214
+ features = schemas.ssp_features
215
+
216
+ else:
217
+ raise ValueError(f"Received unexpected config schema of {_config_schema_name}!")
218
+
219
+ return datasets.DatasetInfo(
220
+ description=_DESCRIPTION,
221
+ features=features,
222
+ homepage=_HOMEPAGE,
223
+ license=_LICENSE,
224
+ citation=_CITATION,
225
+ )
226
+
227
+ def _split_generators(self, dl_manager: DownloadManager) -> List[datasets.SplitGenerator]:
228
+ hf_dset_dict = datasets.load_dataset(_HF_REMOTE_REF, self.config.subset_id)
229
+
230
+ return [datasets.SplitGenerator(name=datasets.Split(dset_key), gen_kwargs={"hf_dset": dset}) for dset_key, dset in hf_dset_dict.items() if dset.num_rows > 0]
231
+
232
+ def _generate_examples(self, hf_dset) -> Iterator[Tuple[int, Dict]]:
233
+ _config_schema_name = self.config.schema
234
+
235
+ _idx = 0
236
+ for datapoints in hf_dset:
237
+ # the `_idx` will be generated manually since no `id` present in the dataset fulfill the purpose as primary key
238
+ if _config_schema_name == "source":
239
+ yield _idx, {colname: datapoints[colname] for colname in self.info.features}
240
+
241
+ elif _config_schema_name == "seacrowd_ssp":
242
+ yield _idx, {"id": _idx, "text": datapoints["text"]}
243
+
244
+ else:
245
+ raise ValueError(f"Received unexpected config schema of {_config_schema_name}!")
246
+
247
+ _idx += 1