File size: 10,279 Bytes
a35d7f9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
# coding=utf-8
# Copyright 2020 HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Lint as: python3
"""MasakhaPOS: Part-of-Speech Tagging for Typologically Diverse African Languages"""

import datasets


logger = datasets.logging.get_logger(__name__)


_CITATION = """\
@inproceedings{dione-etal-2023-masakhapos,
    title = "{M}asakha{POS}: Part-of-Speech Tagging for Typologically Diverse {A}frican languages",
    author = "Dione, Cheikh M. Bamba  and Adelani, David Ifeoluwa  and Nabende, Peter  and Alabi, Jesujoba  and Sindane, Thapelo  and Buzaaba, Happy  and Muhammad, Shamsuddeen Hassan  and Emezue, Chris Chinenye  and Ogayo, Perez  and Aremu, Anuoluwapo  and Gitau, Catherine  and Mbaye, Derguene  and Mukiibi, Jonathan  and Sibanda, Blessing  and Dossou, Bonaventure F. P.  and Bukula, Andiswa  and Mabuya, Rooweither  and Tapo, Allahsera Auguste  and Munkoh-Buabeng, Edwin  and Memdjokam Koagne, Victoire  and Ouoba Kabore, Fatoumata  and Taylor, Amelia  and Kalipe, Godson  and Macucwa, Tebogo  and Marivate, Vukosi  and Gwadabe, Tajuddeen  and Elvis, Mboning Tchiaze  and Onyenwe, Ikechukwu  and Atindogbe, Gratien  and Adelani, Tolulope  and Akinade, Idris  and Samuel, Olanrewaju  and Nahimana, Marien  and Musabeyezu, Th{\'e}og{\`e}ne  and Niyomutabazi, Emile  and Chimhenga, Ester  and Gotosa, Kudzai  and Mizha, Patrick  and Agbolo, Apelete  and Traore, Seydou  and Uchechukwu, Chinedu  and Yusuf, Aliyu  and Abdullahi, Muhammad  and Klakow, Dietrich",
    editor = "Rogers, Anna  and
      Boyd-Graber, Jordan  and
      Okazaki, Naoaki",
    booktitle = "Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
    month = jul,
    year = "2023",
    address = "Toronto, Canada",
    publisher = "Association for Computational Linguistics",
    url = "https://aclanthology.org/2023.acl-long.609",
    doi = "10.18653/v1/2023.acl-long.609",
    pages = "10883--10900",
    abstract = "In this paper, we present AfricaPOS, the largest part-of-speech (POS) dataset for 20 typologically diverse African languages. We discuss the challenges in annotating POS for these languages using the universal dependencies (UD) guidelines. We conducted extensive POS baseline experiments using both conditional random field and several multilingual pre-trained language models. We applied various cross-lingual transfer models trained with data available in the UD. Evaluating on the AfricaPOS dataset, we show that choosing the best transfer language(s) in both single-source and multi-source setups greatly improves the POS tagging performance of the target languages, in particular when combined with parameter-fine-tuning methods. Crucially, transferring knowledge from a language that matches the language family and morphosyntactic properties seems to be more effective for POS tagging in unseen languages.",
}
"""

_DESCRIPTION = """\
MasakhaPOS is the largest publicly available high-quality dataset for part-of-speech (POS) tagging in 20 African languages. The languages covered are: 
- Bambara (bam)
- Ghomala (bbj)
- Ewe (ewe)
- Fon (fon)
- Hausa (hau)
- Igbo (ibo)
- Kinyarwanda (kin)
- Luganda (lug)
- Dholuo (luo) 
- Mossi (mos)
- Chichewa (nya)
- Nigerian Pidgin
- chShona (sna)
- Kiswahili (swą)
- Setswana (tsn)
- Twi (twi)
- Wolof (wol)
- isiXhosa (xho)
- Yorùbá (yor)
- isiZulu (zul)

The train/validation/test sets are available for all the ten languages.

For more details see https://aclanthology.org/2023.acl-long.609/
"""
_URL = "https://github.com/masakhane-io/masakhane-pos/raw/main/data/"
_TRAINING_FILE = "train.txt"
_DEV_FILE = "dev.txt"
_TEST_FILE = "test.txt"


class MasakhaposConfig(datasets.BuilderConfig):
    """BuilderConfig for MasakhaposConfig"""

    def __init__(self, **kwargs):
        """BuilderConfig for MasakhaposConfig.

        Args:
          **kwargs: keyword arguments forwarded to super.
        """
        super(MasakhaposConfig, self).__init__(**kwargs)


class Masakhapos(datasets.GeneratorBasedBuilder):
    """Masakhapos dataset."""

    BUILDER_CONFIGS = [
        MasakhaposConfig(name="bam", version=datasets.Version("1.0.0"), description="Masakhapos Bambara dataset"),
        MasakhaposConfig(name="bbj", version=datasets.Version("1.0.0"), description="Masakhapos Ghomala dataset"),
        MasakhaposConfig(name="ewe", version=datasets.Version("1.0.0"), description="Masakhapos Ewe dataset"),
        MasakhaposConfig(name="fon", version=datasets.Version("1.0.0"), description="Masakhapos Fon dataset"),
        MasakhaposConfig(name="hau", version=datasets.Version("1.0.0"), description="Masakhapos Hausa dataset"),
        MasakhaposConfig(name="ibo", version=datasets.Version("1.0.0"), description="Masakhapos Igbo dataset"),
        MasakhaposConfig(name="kin", version=datasets.Version("1.0.0"), description="Masakhapos Kinyarwanda dataset"),
        MasakhaposConfig(name="lug", version=datasets.Version("1.0.0"), description="Masakhapos Luganda dataset"),
        MasakhaposConfig(name="luo", version=datasets.Version("1.0.0"), description="Masakhapos Luo dataset"),
        MasakhaposConfig(name="mos", version=datasets.Version("1.0.0"), description="Masakhapos Mossi dataset"),
        MasakhaposConfig(name="nya", version=datasets.Version("1.0.0"), description="Masakhapos Chichewa` dataset"),
        MasakhaposConfig(
            name="pcm", version=datasets.Version("1.0.0"), description="Masakhapos Nigerian-Pidgin dataset"
        ),
        MasakhaposConfig(name="sna", version=datasets.Version("1.0.0"), description="Masakhapos Shona dataset"),
        MasakhaposConfig(name="swa", version=datasets.Version("1.0.0"), description="Masakhapos Swahili dataset"),
        MasakhaposConfig(name="tsn", version=datasets.Version("1.0.0"), description="Masakhapos Setswana dataset"),
        MasakhaposConfig(name="twi", version=datasets.Version("1.0.0"), description="Masakhapos Twi dataset"),
        MasakhaposConfig(name="wol", version=datasets.Version("1.0.0"), description="Masakhapos Wolof dataset"),
        MasakhaposConfig(name="xho", version=datasets.Version("1.0.0"), description="Masakhapos Xhosa dataset"),
        MasakhaposConfig(name="yor", version=datasets.Version("1.0.0"), description="Masakhapos Yoruba dataset"),
        MasakhaposConfig(name="zul", version=datasets.Version("1.0.0"), description="Masakhapos Zulu dataset"),
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "id": datasets.Value("string"),
                    "tokens": datasets.Sequence(datasets.Value("string")),
                    "upos": datasets.Sequence(
                        datasets.features.ClassLabel(
                            names=[
                                "NOUN",
                                "PUNCT",
                                "ADP",
                                "NUM",
                                "SYM",
                                "SCONJ",
                                "ADJ",
                                "PART",
                                "DET",
                                "CCONJ",
                                "PROPN",
                                "PRON",
                                "X",
                                "_",
                                "ADV",
                                "INTJ",
                                "VERB",
                                "AUX",
                            ]

                        )
                    ),
                }
            ),
            supervised_keys=None,
            homepage="https://aclanthology.org/2023.acl-long.609/",
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        urls_to_download = {
            "train": f"{_URL}{self.config.name}/{_TRAINING_FILE}",
            "dev": f"{_URL}{self.config.name}/{_DEV_FILE}",
            "test": f"{_URL}{self.config.name}/{_TEST_FILE}",
        }
        downloaded_files = dl_manager.download_and_extract(urls_to_download)

        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
            datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
            datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
        ]

    def _generate_examples(self, filepath):
        logger.info("⏳ Generating examples from = %s", filepath)
        with open(filepath, encoding="utf-8") as f:
            guid = 0
            tokens = []
            pos_tags = []
            for line in f:
                if line.startswith('-DOCSTART-'):
                    continue
                if line == "" or line == "\n":
                    if tokens:
                        yield guid, {
                            "id": str(guid),
                            "tokens": tokens,
                            "upos": pos_tags,
                        }
                        guid += 1
                        tokens = []
                        pos_tags = []
                else:
                    # Masakhapos tokens are space separated
                    splits = line.strip().split()
                    tokens.append(splits[0])
                    pos_tag = splits[-1]
                    pos_tags.append(pos_tag)
            # last example
            if tokens:
                yield guid, {
                    "id": str(guid),
                    "tokens": tokens,
                    "upos": pos_tags,
                }