Datasets:

Multilinguality:
multilingual
Size Categories:
10K<n<100K
Language Creators:
found
Annotations Creators:
no-annotation
Source Datasets:
original
License:
File size: 4,213 Bytes
9a5cab1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e08c84d
9a5cab1
 
413a8d2
 
 
 
 
9a5cab1
413a8d2
 
 
 
 
9a5cab1
 
413a8d2
 
 
 
 
 
 
 
 
9a5cab1
 
 
 
413a8d2
 
9a5cab1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e08c84d
9a5cab1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
868b544
9a5cab1
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Indo-Aryan Language Identification Shared Task Dataset"""


import datasets
from datasets.tasks import TextClassification


_CITATION = r"""\
@inproceedings{zampieri-etal-2018-language,
    title = "Language Identification and Morphosyntactic Tagging: The Second {V}ar{D}ial Evaluation Campaign",
    author = {Zampieri, Marcos  and
      Malmasi, Shervin  and
      Nakov, Preslav  and
      Ali, Ahmed  and
      Shon, Suwon  and
      Glass, James  and
      Scherrer, Yves  and
      Samard{\v{z}}i{\'c}, Tanja  and
      Ljube{\v{s}}i{\'c}, Nikola  and
      Tiedemann, J{\"o}rg  and
      van der Lee, Chris  and
      Grondelaers, Stefan  and
      Oostdijk, Nelleke  and
      Speelman, Dirk  and
      van den Bosch, Antal  and
      Kumar, Ritesh  and
      Lahiri, Bornini  and
      Jain, Mayank},
    booktitle = "Proceedings of the Fifth Workshop on {NLP} for Similar Languages, Varieties and Dialects ({V}ar{D}ial 2018)",
    month = aug,
    year = "2018",
    address = "Santa Fe, New Mexico, USA",
    publisher = "Association for Computational Linguistics",
    url = "https://aclanthology.org/W18-3901",
    pages = "1--17",
}
"""

_DESCRIPTION = """\
This dataset is introduced in a task which aimed at identifying 5 closely-related languages of Indo-Aryan language family –
Hindi (also known as Khari Boli), Braj Bhasha, Awadhi, Bhojpuri, and Magahi.
"""

_URL = "https://raw.githubusercontent.com/kmi-linguistics/vardial2018/master/dataset/{}.txt"


class Ilist(datasets.GeneratorBasedBuilder):
    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "language_id": datasets.ClassLabel(names=["AWA", "BRA", "MAG", "BHO", "HIN"]),
                    "text": datasets.Value("string"),
                }
            ),
            supervised_keys=None,
            homepage="https://github.com/kmi-linguistics/vardial2018",
            citation=_CITATION,
            task_templates=[TextClassification(text_column="text", label_column="language_id")],
        )

    def _split_generators(self, dl_manager):
        filepaths = dl_manager.download_and_extract(
            {
                "train": _URL.format("train"),
                "test": _URL.format("gold"),
                "dev": _URL.format("dev"),
            }
        )

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                # These kwargs will be passed to _generate_examples
                gen_kwargs={
                    "filepath": filepaths["train"],
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                # These kwargs will be passed to _generate_examples
                gen_kwargs={
                    "filepath": filepaths["test"],
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                # These kwargs will be passed to _generate_examples
                gen_kwargs={
                    "filepath": filepaths["dev"],
                },
            ),
        ]

    def _generate_examples(self, filepath):
        """Yields examples."""
        with open(filepath, "r", encoding="utf-8") as file:
            for idx, row in enumerate(file):
                row = row.strip("\n").split("\t")
                if len(row) == 1:
                    continue
                yield idx, {"language_id": row[1], "text": row[0]}