kelechi commited on
Commit
326f806
1 Parent(s): 445b3c2

added load script

Browse files
Files changed (1) hide show
  1. afriberta.py +112 -0
afriberta.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the 'License');
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an 'AS IS' BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+
18
+ import datasets
19
+
20
+ _DESCRIPTION = """\
21
+ Corpus used for training AfriBERTa models
22
+ """
23
+ _CITATION = """\
24
+ @inproceedings{ogueji-etal-2021-small,
25
+ title = "Small Data? No Problem! Exploring the Viability of Pretrained Multilingual Language Models for Low-resourced Languages",
26
+ author = "Ogueji, Kelechi and
27
+ Zhu, Yuxin and
28
+ Lin, Jimmy",
29
+ booktitle = "Proceedings of the 1st Workshop on Multilingual Representation Learning",
30
+ month = nov,
31
+ year = "2021",
32
+ address = "Punta Cana, Dominican Republic",
33
+ publisher = "Association for Computational Linguistics",
34
+ url = "https://aclanthology.org/2021.mrl-1.11",
35
+ pages = "116--126",
36
+ }
37
+ """
38
+ _HOMEPAGE_URL = "https://github.com/keleog/afriberta"
39
+ _VERSION = "1.0.0"
40
+ _LANGUAGES = [
41
+ "afaanoromoo",
42
+ "amharic",
43
+ "gahuza",
44
+ "hausa",
45
+ "igbo",
46
+ "pidgin",
47
+ "somali",
48
+ "swahili",
49
+ "tigrinya",
50
+ "yoruba"]
51
+
52
+ _DATASET_URLS = {
53
+ language: {
54
+ "train": f"https://huggingface.co/datasets/castorini/afriberta/resolve/main/{language}/train.zip",
55
+ "test": f"https://huggingface.co/datasets/castorini/afriberta/resolve/main/{language}/eval.zip",
56
+ } for language in _LANGUAGES
57
+ }
58
+
59
+ class MrTyDiCorpus(datasets.GeneratorBasedBuilder):
60
+ BUILDER_CONFIGS = [
61
+ datasets.BuilderConfig(
62
+ version=datasets.Version(_VERSION),
63
+ name=language,
64
+ description=f"AfriBERTa corpus for {language}."
65
+ ) for language in _LANGUAGES
66
+ ]
67
+
68
+ def _info(self):
69
+ return datasets.DatasetInfo(
70
+ description=_DESCRIPTION,
71
+ features=datasets.Features(
72
+ {
73
+ "id": datasets.Value("string"),
74
+ "text": datasets.Value("string"),
75
+ },
76
+ ),
77
+ supervised_keys=None,
78
+ homepage=_HOMEPAGE_URL,
79
+ citation=_CITATION,
80
+ )
81
+
82
+ def _split_generators(self, dl_manager):
83
+ language = self.config.name
84
+ downloaded_files = dl_manager.download_and_extract(_DATASET_URLS[language])
85
+
86
+ splits = [
87
+ datasets.SplitGenerator(
88
+ name="train",
89
+ gen_kwargs={
90
+ "file_path": downloaded_files["train"],
91
+ },
92
+ ),
93
+ datasets.SplitGenerator(
94
+ name="test",
95
+ gen_kwargs={
96
+ "file_path": downloaded_files["test"],
97
+ },
98
+ ),
99
+ ]
100
+ return splits
101
+
102
+ def _generate_examples(self, file_path):
103
+ with open(file_path, encoding="utf-8") as f:
104
+ for sentence_counter, line in enumerate(f):
105
+ result = (
106
+ sentence_counter,
107
+ {
108
+ "id": str(sentence_counter),
109
+ "text": line,
110
+ },
111
+ )
112
+ yield result