holylovenia commited on
Commit
327b1cf
1 Parent(s): 954a6bb

Upload indocoref.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. indocoref.py +248 -0
indocoref.py ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pathlib import Path
3
+ from typing import Dict, List, Tuple
4
+
5
+ try:
6
+ from typing import TypedDict
7
+ except:
8
+ from typing_extensions import TypedDict
9
+
10
+ import datasets
11
+
12
+ from nusacrowd.nusa_datasets.indocoref.utils.text_preprocess import \
13
+ TextPreprocess
14
+ from nusacrowd.utils import schemas
15
+ from nusacrowd.utils.configs import NusantaraConfig
16
+ from nusacrowd.utils.constants import Tasks
17
+
18
+ _CITATION = """\
19
+ @inproceedings{artari-etal-2021-multi,
20
+ title = {A Multi-Pass Sieve Coreference Resolution for {I}ndonesian},
21
+ author = {Artari, Valentina Kania Prameswara and Mahendra, Rahmad and Jiwanggi, Meganingrum Arista and Anggraito, Adityo and Budi, Indra},
22
+ year = 2021,
23
+ month = sep,
24
+ booktitle = {Proceedings of the International Conference on Recent Advances in Natural Language Processing (RANLP 2021)},
25
+ publisher = {INCOMA Ltd.},
26
+ address = {Held Online},
27
+ pages = {79--85},
28
+ url = {https://aclanthology.org/2021.ranlp-1.10},
29
+ abstract = {Coreference resolution is an NLP task to find out whether the set of referring expressions belong to the same concept in discourse. A multi-pass sieve is a deterministic coreference model that implements several layers of sieves, where each sieve takes a pair of correlated mentions from a collection of non-coherent mentions. The multi-pass sieve is based on the principle of high precision, followed by increased recall in each sieve. In this work, we examine the portability of the multi-pass sieve coreference resolution model to the Indonesian language. We conduct the experiment on 201 Wikipedia documents and the multi-pass sieve system yields 72.74{\%} of MUC F-measure and 52.18{\%} of BCUBED F-measure.}
30
+ }
31
+ """
32
+
33
+ _LOCAL = False
34
+ _LANGUAGES = ["ind"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
35
+ _DATASETNAME = "indocoref"
36
+ _DESCRIPTION = """\
37
+ Dataset contains articles from Wikipedia Bahasa Indonesia which fulfill these conditions:
38
+ - The pages contain many noun phrases, which the authors subjectively pick: (i) fictional plots, e.g., subtitles for films,
39
+ TV show episodes, and novel stories; (ii) biographies (incl. fictional characters); and (iii) historical events or important events.
40
+ - The pages contain significant variation of pronoun and named-entity. We count the number of first, second, third person pronouns,
41
+ and clitic pronouns in the document by applying string matching.We examine the number
42
+ of named-entity using the Stanford CoreNLP
43
+ NER Tagger (Manning et al., 2014) with a
44
+ model trained from the Indonesian corpus
45
+ taken from Alfina et al. (2016).
46
+ The Wikipedia texts have length of 500 to
47
+ 2000 words.
48
+ We sample 201 of pages from subset of filtered
49
+ Wikipedia pages. We hire five annotators who are
50
+ undergraduate student in Linguistics department.
51
+ They are native in Indonesian. Annotation is carried out using the Script d’Annotation des Chanes
52
+ de Rfrence (SACR), a web-based Coreference resolution annotation tool developed by Oberle (2018).
53
+ From the 201 texts, there are 16,460 mentions
54
+ tagged by the annotators
55
+ """
56
+
57
+ _HOMEPAGE = "https://github.com/valentinakania/indocoref/"
58
+ _LICENSE = "MIT"
59
+ _URLS = {
60
+ _DATASETNAME: "https://github.com/valentinakania/indocoref/archive/refs/heads/main.zip",
61
+ }
62
+ _SUPPORTED_TASKS = [Tasks.COREFERENCE_RESOLUTION]
63
+ # Does not seem to have versioning
64
+ _SOURCE_VERSION = "1.0.0"
65
+ _NUSANTARA_VERSION = "1.0.0"
66
+
67
+
68
+ class Indocoref(datasets.GeneratorBasedBuilder):
69
+ """A collection of 210 curated articles from Wikipedia Bahasa Indonesia with Coreference Annotations"""
70
+
71
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
72
+ NUSANTARA_VERSION = datasets.Version(_NUSANTARA_VERSION)
73
+
74
+ BUILDER_CONFIGS = [
75
+ NusantaraConfig(
76
+ name="indocoref_source",
77
+ version=SOURCE_VERSION,
78
+ description="Indocoref source schema",
79
+ schema="source",
80
+ subset_id="indocoref",
81
+ ),
82
+ NusantaraConfig(
83
+ name="indocoref_nusantara_kb",
84
+ version=NUSANTARA_VERSION,
85
+ description="Indocoref Nusantara schema",
86
+ schema="nusantara_kb",
87
+ subset_id="indocoref",
88
+ ),
89
+ ]
90
+
91
+ DEFAULT_CONFIG_NAME = "indocoref_source"
92
+
93
+ def _info(self) -> datasets.DatasetInfo:
94
+ # The dataset does not really come with a schema, the features here come from the returned value
95
+ # of the accompanying utils files.
96
+ if self.config.schema == "source":
97
+ features = datasets.Features(
98
+ {
99
+ "id": datasets.Value("int64"),
100
+ "passage": datasets.Value("string"),
101
+ "mentions": [
102
+ {
103
+ "id": datasets.Value("int64"),
104
+ # Two entities which share a label are coreferences
105
+ "labels": datasets.Sequence(datasets.Value("string")),
106
+ "class": datasets.Value("string"),
107
+ "text": datasets.Value("string"),
108
+ "pronoun": datasets.Value("bool"),
109
+ "proper": datasets.Value("bool"),
110
+ "sent": datasets.Value("int32"),
111
+ "cluster": datasets.Value("int32"),
112
+ "per": datasets.Value("bool"),
113
+ "org": datasets.Value("bool"),
114
+ "loc": datasets.Value("bool"),
115
+ "ner": datasets.Value("bool"),
116
+ # "offset" is only available after modifying the original util class
117
+ # "offset": datasets.Sequence(datasets.Value("int32"))
118
+ # POS tags were originally available but removed due to polyglot icu dependency
119
+ # polyglot.Text(passage, hint_language_code='id')
120
+ }
121
+ ],
122
+ }
123
+ )
124
+ elif self.config.schema == "nusantara_kb":
125
+ features = schemas.kb_features
126
+
127
+ return datasets.DatasetInfo(
128
+ description=_DESCRIPTION,
129
+ features=features,
130
+ homepage=_HOMEPAGE,
131
+ license=_LICENSE,
132
+ citation=_CITATION,
133
+ )
134
+
135
+ class ReadPassage(TypedDict):
136
+ passage: str
137
+ annotated: str
138
+ mentions: List[any]
139
+
140
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
141
+ """Returns SplitGenerators."""
142
+ urls = _URLS[_DATASETNAME]
143
+ base_path = Path(dl_manager.download_and_extract(urls)) / "indocoref-main" / "data"
144
+ passage_path = base_path / "passage"
145
+ annotated_path = base_path / "annotated"
146
+ mentions_per_file = TextPreprocess(annotated_path).run(0)
147
+
148
+ data: List[self.ReadPassage] = []
149
+ for passage_file_name, annotated_file_name in zip(sorted(os.listdir(passage_path)), sorted(os.listdir(annotated_path))):
150
+ passage_file_path, annotated_file_path = passage_path / passage_file_name, annotated_path / annotated_file_name
151
+
152
+ if os.path.isfile(passage_file_path) and os.path.isfile(annotated_file_path):
153
+ with open(passage_file_path, "r") as fpassage, open(annotated_file_path, "r") as fannotated:
154
+ data.append(self.ReadPassage(passage=fpassage.read(), annotated=fannotated.read(), mentions=mentions_per_file[annotated_file_name]))
155
+
156
+ # Dataset has no predefined splits, using datasets.Split.TRAIN for all of the data.
157
+ return [
158
+ datasets.SplitGenerator(
159
+ name=datasets.Split.TRAIN,
160
+ gen_kwargs={
161
+ "data": data,
162
+ "split": "train",
163
+ },
164
+ ),
165
+ ]
166
+
167
+ class DisjointSet:
168
+ parent = {}
169
+
170
+ def __init__(self, items):
171
+ for item in items:
172
+ self.parent[item] = item
173
+
174
+ def find(self, k):
175
+ if self.parent[k] == k:
176
+ return k
177
+ return self.find(self.parent[k])
178
+
179
+ def union(self, a, b):
180
+ x = self.find(a)
181
+ y = self.find(b)
182
+ self.parent[x] = y
183
+
184
+ def _generate_examples(self, data: List[ReadPassage], split: str) -> Tuple[int, Dict]:
185
+ """Yields examples as (key, example) tuples."""
186
+ if self.config.schema == "source":
187
+ for index, example in enumerate(data):
188
+ passage, mentions = example["passage"], example["mentions"]
189
+ row = {
190
+ "id": index,
191
+ "passage": passage,
192
+ "mentions": [
193
+ {
194
+ "id": mention["id"],
195
+ "labels": mention["labels"],
196
+ "class": mention["class"],
197
+ "text": mention["text"],
198
+ "pronoun": mention["pronoun"],
199
+ "proper": mention["proper"],
200
+ "sent": mention["sent"],
201
+ "cluster": mention["cluster"],
202
+ "per": mention["per"],
203
+ "org": mention["org"],
204
+ "loc": mention["loc"],
205
+ "ner": mention["ner"],
206
+ }
207
+ for mention in mentions
208
+ ],
209
+ }
210
+ yield index, row
211
+
212
+ elif self.config.schema == "nusantara_kb":
213
+ for index, example in enumerate(data):
214
+ passage, mentions = example["passage"], example["mentions"]
215
+ # Annotated text does not have any line breaks but the original passage does
216
+ passage = passage.replace(" \n", " ")
217
+ passage = passage.replace("\n", " ")
218
+ all_labels = {label for mention in mentions for label in mention["labels"]}
219
+ labels_disjoint_set = self.DisjointSet(all_labels)
220
+ for mention in mentions:
221
+ for i in range(1, len(mention["labels"])):
222
+ labels_disjoint_set.union(mention["labels"][i], mention["labels"][i - 1])
223
+ coreferences = {}
224
+ for mention in mentions:
225
+ coreference_id = labels_disjoint_set.find(mention["labels"][0])
226
+ if coreference_id not in coreferences:
227
+ coreferences[coreference_id] = []
228
+ coreferences[coreference_id].append(str(mention["id"]))
229
+
230
+ row_id = str(index)
231
+ row = {
232
+ "id": row_id,
233
+ "passages": [{"id": "passage-" + row_id, "type": "text", "text": [passage], "offsets": [[0, len(passage)]]}],
234
+ "entities": [
235
+ {
236
+ "id": row_id + "-entity-" + str(mention["id"]),
237
+ "type": mention["class"],
238
+ "text": [mention["text"]],
239
+ "offsets": [list(mention["offset"])],
240
+ "normalized": [],
241
+ }
242
+ for mention in mentions
243
+ ],
244
+ "coreferences": [{"id": row_id + "-coreference-" + str(coref_id), "entity_ids": [row_id + "-entity-" + entity_id for entity_id in entity_ids]} for coref_id, entity_ids in enumerate(coreferences.values())],
245
+ "events": [],
246
+ "relations": [],
247
+ }
248
+ yield index, row