holylovenia commited on
Commit
5a2119a
1 Parent(s): 8168aa5

Upload udhr_lid.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. udhr_lid.py +160 -0
udhr_lid.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from pathlib import Path
17
+ from typing import Dict, List, Tuple
18
+
19
+ import datasets
20
+ import pandas as pd
21
+
22
+ from seacrowd.utils import schemas
23
+ from seacrowd.utils.configs import SEACrowdConfig
24
+ from seacrowd.utils.constants import Licenses, Tasks
25
+
26
+ _CITATION = r"""\
27
+ @inproceedings{
28
+ kargaran2023glotlid,
29
+ title={{GlotLID}: Language Identification for Low-Resource Languages},
30
+ author={Kargaran, Amir Hossein and Imani, Ayyoob and Yvon, Fran{\c{c}}ois and Sch{\"u}tze, Hinrich},
31
+ booktitle={The 2023 Conference on Empirical Methods in Natural Language Processing},
32
+ year={2023},
33
+ url={https://openreview.net/forum?id=dl4e3EBz5j}
34
+ }
35
+ """
36
+
37
+ _LANGUAGES = [
38
+ "sun",
39
+ "ace",
40
+ "mad",
41
+ "lao",
42
+ "cfm",
43
+ "hnj",
44
+ "min",
45
+ "zlm",
46
+ "tha",
47
+ "blt",
48
+ "hni",
49
+ "jav",
50
+ "tdt",
51
+ "cnh",
52
+ "khm",
53
+ "ban",
54
+ "ind",
55
+ "mya",
56
+ "ccp",
57
+ "duu",
58
+ "tet",
59
+ "kkh",
60
+ "bug",
61
+ "vie",
62
+ ] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
63
+ _LOCAL = False
64
+
65
+ _DATASETNAME = "udhr_lid"
66
+
67
+ _DESCRIPTION = """\
68
+ The UDHR-LID dataset is a refined version of the Universal Declaration of Human Rights, tailored for language identification tasks.
69
+ It removes filler texts, repeated phrases, and inaccuracies from the original UDHR, focusing only on cleaned paragraphs.
70
+ Each entry in the dataset is associated with a specific language, providing long, linguistically rich content.
71
+ This dataset is particularly useful for non-parallel, language-specific text analysis in natural language processing.
72
+ """
73
+
74
+ _HOMEPAGE = "https://huggingface.co/datasets/cis-lmu/udhr-lid"
75
+
76
+ _LICENSE = Licenses.CC0_1_0.value
77
+
78
+ _URL = "https://huggingface.co/datasets/cis-lmu/udhr-lid/raw/main/udhr-lid.csv"
79
+
80
+ _SUPPORTED_TASKS = [Tasks.LANGUAGE_IDENTIFICATION]
81
+
82
+ _SOURCE_VERSION = "1.0.0"
83
+
84
+ _SEACROWD_VERSION = "2024.06.20"
85
+
86
+
87
+ class UDHRLID(datasets.GeneratorBasedBuilder):
88
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
89
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
90
+
91
+ BUILDER_CONFIGS = [
92
+ SEACrowdConfig(
93
+ name=f"{_DATASETNAME}_source",
94
+ version=SOURCE_VERSION,
95
+ description=f"{_DATASETNAME} source schema",
96
+ schema="source",
97
+ subset_id=f"{_DATASETNAME}",
98
+ ),
99
+ SEACrowdConfig(
100
+ name=f"{_DATASETNAME}_seacrowd_text",
101
+ version=SEACROWD_VERSION,
102
+ description=f"{_DATASETNAME} SEACrowd Schema",
103
+ schema="seacrowd_text",
104
+ subset_id=f"{_DATASETNAME}",
105
+ ),
106
+ ]
107
+
108
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
109
+
110
+ def _info(self) -> datasets.DatasetInfo:
111
+
112
+ if self.config.schema == "source":
113
+ features = datasets.Features(
114
+ {
115
+ "id": datasets.Value("string"),
116
+ "sentence": datasets.Value("string"),
117
+ "iso639-3": datasets.Value("string"),
118
+ "iso15924": datasets.Value("string"),
119
+ "language": datasets.Value("string"),
120
+ }
121
+ )
122
+ elif self.config.schema == "seacrowd_text":
123
+ features = schemas.text_features(_LANGUAGES)
124
+
125
+ else:
126
+ raise NotImplementedError(f"Schema '{self.config.schema}' is not defined.")
127
+
128
+ return datasets.DatasetInfo(
129
+ description=_DESCRIPTION,
130
+ features=features,
131
+ homepage=_HOMEPAGE,
132
+ license=_LICENSE,
133
+ citation=_CITATION,
134
+ )
135
+
136
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
137
+ """Returns SplitGenerators."""
138
+ data_path = dl_manager.download(_URL)
139
+
140
+ return [
141
+ datasets.SplitGenerator(
142
+ name=datasets.Split.TEST,
143
+ gen_kwargs={
144
+ "filepath": data_path,
145
+ },
146
+ ),
147
+ ]
148
+
149
+ def _generate_examples(self, filepath: Path) -> Tuple[int, Dict]:
150
+
151
+ datas = pd.read_csv(filepath)
152
+
153
+ for i, row in datas.iterrows():
154
+ if row["iso639-3"] in _LANGUAGES:
155
+ if self.config.schema == "source":
156
+ yield i, {"id": str(i), "sentence": row["sentence"], "iso639-3": row["iso639-3"], "iso15924": row["iso15924"], "language": row["language"]}
157
+ elif self.config.schema == "seacrowd_text":
158
+ yield i, {"id": str(i), "text": row["sentence"], "label": row["iso639-3"]}
159
+ else:
160
+ raise ValueError(f"Invalid config: {self.config.name}")