File size: 3,667 Bytes
326f806
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63875f8
326f806
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2f32017
 
326f806
 
 
1be6acb
326f806
 
 
63875f8
326f806
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63875f8
326f806
63875f8
326f806
 
 
63875f8
326f806
025d3e4
326f806
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Lint as: python3

import os
import datasets

_DESCRIPTION = """\
Corpus used for training AfriBERTa models
"""
_CITATION = """\
@inproceedings{ogueji-etal-2021-small,
    title = "Small Data? No Problem! Exploring the Viability of Pretrained Multilingual Language Models for Low-resourced Languages",
    author = "Ogueji, Kelechi  and
      Zhu, Yuxin  and
      Lin, Jimmy",
    booktitle = "Proceedings of the 1st Workshop on Multilingual Representation Learning",
    month = nov,
    year = "2021",
    address = "Punta Cana, Dominican Republic",
    publisher = "Association for Computational Linguistics",
    url = "https://aclanthology.org/2021.mrl-1.11",
    pages = "116--126",
}
"""
_HOMEPAGE_URL = "https://github.com/keleog/afriberta"
_VERSION = "1.0.0"
_LANGUAGES = [
    "afaanoromoo", 
    "amharic", 
    "gahuza", 
    "hausa", 
    "igbo", 
    "pidgin", 
    "somali", 
    "swahili", 
    "tigrinya", 
    "yoruba"]

_DATASET_URLS = {
    language: {
        "train": f"https://huggingface.co/datasets/castorini/afriberta-corpus/resolve/main/{language}/train.zip",
        "test": f"https://huggingface.co/datasets/castorini/afriberta-corpus/resolve/main/{language}/eval.zip",
    } for language in _LANGUAGES
}

class AfribertaCorpus(datasets.GeneratorBasedBuilder):
    BUILDER_CONFIGS = [
        datasets.BuilderConfig(
            version=datasets.Version(_VERSION),
            name=language,
            description=f"AfriBERTa corpus for {language}."
        ) for language in _LANGUAGES
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "id": datasets.Value("string"),
                    "text": datasets.Value("string"),
                },
            ),
            supervised_keys=None,
            homepage=_HOMEPAGE_URL,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        language = self.config.name
        downloaded_files = dl_manager.download_and_extract(_DATASET_URLS[language])

        splits = [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "file_path": os.path.join(downloaded_files["train"], "train.txt"),
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={
                    "file_path": os.path.join(downloaded_files["test"], "eval.txt"),
                },
            ),
        ]
        return splits

    def _generate_examples(self, file_path):
        with open(file_path, encoding="utf-8") as f:
            for sentence_counter, line in enumerate(f):
                result = (
                    sentence_counter,
                    {
                        "id": str(sentence_counter),
                        "text": line,
                    },
                )
                yield result