holylovenia commited on
Commit
e0f3f6d
1 Parent(s): e78cde4

Upload wikiann.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. wikiann.py +197 -0
wikiann.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import List
3
+
4
+ import datasets
5
+ from datasets import NamedSplit
6
+
7
+ from nusacrowd.utils import schemas
8
+ from nusacrowd.utils.configs import NusantaraConfig
9
+ from nusacrowd.utils.constants import (DEFAULT_NUSANTARA_VIEW_NAME,
10
+ DEFAULT_SOURCE_VIEW_NAME, Tasks)
11
+
12
+ _DATASETNAME = "wikiann"
13
+ _SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
14
+ _UNIFIED_VIEW_NAME = DEFAULT_NUSANTARA_VIEW_NAME
15
+
16
+ _LANGUAGES = ["ind", "eng", "jav", "min", "sun", "ace", "mly", "map-bms"]
17
+ _LOCAL = False
18
+ _CITATION = """\
19
+ @inproceedings{pan-etal-2017-cross,
20
+ title = "Cross-lingual Name Tagging and Linking for 282 Languages",
21
+ author = "Pan, Xiaoman and
22
+ Zhang, Boliang and
23
+ May, Jonathan and
24
+ Nothman, Joel and
25
+ Knight, Kevin and
26
+ Ji, Heng",
27
+ booktitle = "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
28
+ month = jul,
29
+ year = "2017",
30
+ address = "Vancouver, Canada",
31
+ publisher = "Association for Computational Linguistics",
32
+ url = "https://www.aclweb.org/anthology/P17-1178",
33
+ doi = "10.18653/v1/P17-1178",
34
+ pages = "1946--1958",
35
+ abstract = "The ambitious goal of this work is to develop a cross-lingual name tagging and linking framework
36
+ for 282 languages that exist in Wikipedia. Given a document in any of these languages, our framework is able
37
+ to identify name mentions, assign a coarse-grained or fine-grained type to each mention, and link it to
38
+ an English Knowledge Base (KB) if it is linkable. We achieve this goal by performing a series of
39
+ new KB mining methods: generating {``}silver-standard{''} annotations by
40
+ transferring annotations from English to other languages through cross-lingual links and KB properties,
41
+ refining annotations through self-training and topic selection,
42
+ deriving language-specific morphology features from anchor links, and mining word translation pairs from
43
+ cross-lingual links. Both name tagging and linking results for 282 languages are promising
44
+ on Wikipedia data and on-Wikipedia data.",
45
+ }
46
+ @inproceedings{rahimi-etal-2019-massively,
47
+ title = "Massively Multilingual Transfer for {NER}",
48
+ author = "Rahimi, Afshin and
49
+ Li, Yuan and
50
+ Cohn, Trevor",
51
+ booktitle = "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics",
52
+ month = jul,
53
+ year = "2019",
54
+ address = "Florence, Italy",
55
+ publisher = "Association for Computational Linguistics",
56
+ url = "https://www.aclweb.org/anthology/P19-1015",
57
+ pages = "151--164",
58
+ }
59
+ """
60
+
61
+ _DESCRIPTION = """\
62
+ The wikiann dataset contains NER tags with labels from O (0), B-PER (1), I-PER (2), B-ORG (3), I-ORG (4), B-LOC (5), I-LOC (6). The Indonesian subset is used.
63
+ WikiANN (sometimes called PAN-X) is a multilingual named entity recognition dataset consisting of Wikipedia articles
64
+ annotated with LOC (location), PER (person), and ORG (organisation)
65
+ tags in the IOB2 format. This version corresponds to the balanced train, dev, and test splits of
66
+ Rahimi et al. (2019), and uses the following subsets from the original WikiANN corpus
67
+
68
+ Language WikiAnn ISO 639-3
69
+ Indonesian id ind
70
+ Javanese jv jav
71
+ Minangkabau min min
72
+ Sundanese su sun
73
+ Acehnese ace ace
74
+ Malay ms mly
75
+ Banyumasan map-bms map-bms
76
+
77
+
78
+ """
79
+
80
+ _HOMEPAGE = "https://github.com/afshinrahimi/mmner"
81
+
82
+ _LICENSE = "Apache-2.0 license"
83
+
84
+ _URLs = {
85
+ "wikiann": "https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip",
86
+ }
87
+
88
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION]
89
+
90
+ _SOURCE_VERSION = "1.1.0"
91
+ _NUSANTARA_VERSION = "1.0.0"
92
+
93
+
94
+ def nusantara_config_constructor(lang, schema, version):
95
+ if lang == "":
96
+ raise ValueError(f"Invalid lang {lang}")
97
+
98
+ if schema != "source" and schema != "nusantara_seq_label":
99
+ raise ValueError(f"Invalid schema: {schema}")
100
+
101
+ return NusantaraConfig(
102
+ name="wikiann_{lang}_{schema}".format(lang=lang, schema=schema),
103
+ version=datasets.Version(version),
104
+ description="wikiann with {schema} schema for {lang} language".format(lang=lang, schema=schema),
105
+ schema=schema,
106
+ subset_id="wikiann",
107
+ )
108
+
109
+
110
+ LANGUAGES_MAP = {"eng": "english", "ind": "indonesian", "jav": "javanese", "min": "minangkabau", "sun": "sundanese", "ace": "acehnese", "mly": "malay", "map_bms": "banyumasan"} # Actual code is map-bms
111
+ LANG_CODES = {"eng": "en", "ind": "id", "jav": "jv", "min": "min", "sun": "su", "ace": "ace", "mly": "ms", "map_bms": "map-bms"}
112
+
113
+
114
+ class WikiAnnDataset(datasets.GeneratorBasedBuilder):
115
+ """wikiann is an NER tagging dataset consisting of Wikipedia articles annotated with LOC, PER, and ORG tags
116
+ for multiple Indonesian language. If the language is not specified, it loads the Indonesian subset."""
117
+
118
+ label_classes = ["B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC", "O"]
119
+
120
+ BUILDER_CONFIGS = [nusantara_config_constructor(lang, "source", _SOURCE_VERSION) for lang in LANGUAGES_MAP] + [nusantara_config_constructor(lang, "nusantara_seq_label", _NUSANTARA_VERSION) for lang in LANGUAGES_MAP]
121
+
122
+ DEFAULT_CONFIG_NAME = "wikiann_ind_source"
123
+
124
+ def _info(self):
125
+ if self.config.schema == "source":
126
+ features = datasets.Features({"index": datasets.Value("string"), "tokens": [datasets.Value("string")], "ner_tag": [datasets.Value("string")]})
127
+ elif self.config.schema == "nusantara_seq_label":
128
+ features = schemas.seq_label_features(self.label_classes)
129
+
130
+ return datasets.DatasetInfo(
131
+ description=_DESCRIPTION,
132
+ features=features,
133
+ homepage=_HOMEPAGE,
134
+ license=_LICENSE,
135
+ citation=_CITATION,
136
+ )
137
+
138
+ def get_lang(self, name):
139
+ return name.removesuffix("_source").removesuffix("_nusantara_seq_label").removeprefix("wikiann_")
140
+
141
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
142
+ path = Path(dl_manager.download_and_extract(_URLs["wikiann"]))
143
+ lang = LANG_CODES[self.get_lang(self.config.name)]
144
+ wikiann_dl_dir = path / f"{lang}.tar.gz"
145
+ return [
146
+ datasets.SplitGenerator(
147
+ name=datasets.Split.VALIDATION,
148
+ gen_kwargs={"split": "dev", "filepath": dl_manager.iter_archive(wikiann_dl_dir)},
149
+ ),
150
+ datasets.SplitGenerator(
151
+ name=datasets.Split.TEST,
152
+ gen_kwargs={"split": "test", "filepath": dl_manager.iter_archive(wikiann_dl_dir)},
153
+ ),
154
+ datasets.SplitGenerator(
155
+ name=datasets.Split.TRAIN,
156
+ gen_kwargs={"split": "train", "filepath": dl_manager.iter_archive(wikiann_dl_dir)},
157
+ ),
158
+ datasets.SplitGenerator(
159
+ name=NamedSplit("extra"),
160
+ gen_kwargs={"split": "extra", "filepath": dl_manager.iter_archive(wikiann_dl_dir)},
161
+ ),
162
+ ]
163
+
164
+ def _generate_examples(self, filepath: Path, split):
165
+ """Based on https://github.com/huggingface/datasets/blob/main/datasets/wikiann/wikiann.py"""
166
+ fps = filepath
167
+ tokens = []
168
+ ner_tags = []
169
+ langs = []
170
+ guid_index = 0
171
+ for k, file in fps:
172
+ if k == split:
173
+ for line in file:
174
+ line = line.decode("utf-8")
175
+ if line == "" or line == "\n":
176
+ if tokens:
177
+ if self.config.schema == "source":
178
+ yield guid_index, {"index": str(guid_index), "tokens": tokens, "ner_tag": ner_tags}
179
+ elif self.config.schema == "nusantara_seq_label":
180
+ yield guid_index, {"id": str(guid_index), "tokens": tokens, "labels": ner_tags}
181
+ else:
182
+ raise ValueError(f"Invalid config: {self.config.name}")
183
+ guid_index += 1
184
+ tokens = []
185
+ ner_tags = []
186
+ langs = []
187
+ else:
188
+ # wikiann data is tab separated
189
+ splits = line.split("\t")
190
+ # strip out en: prefix
191
+ langs.append(splits[0].split(":")[0])
192
+ tokens.append(":".join(splits[0].split(":")[1:]))
193
+ if len(splits) > 1:
194
+ ner_tags.append(splits[-1].replace("\n", ""))
195
+ else:
196
+ # examples have no label in test set
197
+ ner_tags.append("O")