holylovenia commited on
Commit
31568bd
1 Parent(s): cd0f166

Upload id_coreference_resolution.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. id_coreference_resolution.py +208 -0
id_coreference_resolution.py ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pathlib import Path
3
+ from typing import Dict, List, Tuple
4
+ from nusacrowd.utils.constants import Tasks
5
+ from nusacrowd.utils import schemas
6
+
7
+ import datasets
8
+ import json
9
+ import xml.etree.ElementTree as ET
10
+
11
+ from nusacrowd.utils.configs import NusantaraConfig
12
+
13
+ _CITATION = """\
14
+ @INPROCEEDINGS{8074648,
15
+ author={Suherik, Gilang Julian and Purwarianti, Ayu},
16
+ booktitle={2017 5th International Conference on Information and Communication Technology (ICoIC7)},
17
+ title={Experiments on coreference resolution for Indonesian language with lexical and shallow syntactic features},
18
+ year={2017},
19
+ volume={},
20
+ number={},
21
+ pages={1-5},
22
+ doi={10.1109/ICoICT.2017.8074648}}
23
+ """
24
+
25
+ _LANGUAGES = ["ind"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
26
+ _LOCAL = False
27
+
28
+ _DATASETNAME = "id_coreference_resolution"
29
+
30
+ _DESCRIPTION = """\
31
+ We built Indonesian coreference resolution that solves not only pronoun referenced to proper noun, but also proper noun to proper noun and pronoun to pronoun.
32
+ The differences with the available Indonesian coreference resolution lay on the problem scope and features.
33
+ We conducted experiments using various features (lexical and shallow syntactic features) such as appositive feature, nearest candidate feature, direct sentence feature, previous and next word feature, and a lexical feature of first person.
34
+ We also modified the method to build the training set by selecting the negative examples by cross pairing every single markable that appear between antecedent and anaphor.
35
+ Compared with two available methods to build the training set, we conducted experiments using C45 algorithm.
36
+ Using 200 news sentences, the best experiment achieved 71.6% F-Measure score.
37
+ """
38
+
39
+ _HOMEPAGE = "https://github.com/tugas-akhir-nlp/indonesian-coreference-resolution-cnn/tree/master/data"
40
+
41
+ _LICENSE = "Creative Commons Attribution-ShareAlike 4.0"
42
+
43
+ _URLS = {
44
+ _DATASETNAME: {
45
+ "train": "https://raw.githubusercontent.com/tugas-akhir-nlp/indonesian-coreference-resolution-cnn/master/data/training/data.xml",
46
+ "test": "https://raw.githubusercontent.com/tugas-akhir-nlp/indonesian-coreference-resolution-cnn/master/data/testing/data.xml"
47
+ }
48
+ }
49
+
50
+ _SUPPORTED_TASKS = [Tasks.COREFERENCE_RESOLUTION]
51
+
52
+ _SOURCE_VERSION = "1.0.0"
53
+
54
+ _NUSANTARA_VERSION = "1.0.0"
55
+
56
+ class IDCoreferenceResolution(datasets.GeneratorBasedBuilder):
57
+
58
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
59
+ NUSANTARA_VERSION = datasets.Version(_NUSANTARA_VERSION)
60
+
61
+ BUILDER_CONFIGS = [
62
+ NusantaraConfig(
63
+ name="id_coreference_resolution_source",
64
+ version=SOURCE_VERSION,
65
+ description="ID Coreference Resolution source schema",
66
+ schema="source",
67
+ subset_id="id_coreference_resolution",
68
+ ),
69
+ NusantaraConfig(
70
+ name="id_coreference_resolution_nusantara_kb",
71
+ version=NUSANTARA_VERSION,
72
+ description="ID Coreference Resolution Nusantara schema",
73
+ schema="nusantara_kb",
74
+ subset_id="id_coreference_resolution",
75
+ ),
76
+ ]
77
+
78
+ DEFAULT_CONFIG_NAME = "id_coreference_resolution_source"
79
+
80
+ def _info(self) -> datasets.DatasetInfo:
81
+
82
+ if self.config.schema == "source":
83
+ features = datasets.Features(
84
+ {
85
+ "id": datasets.Value("string"),
86
+ "phrases": [
87
+ {
88
+ "id": datasets.Value("string"),
89
+ "type": datasets.Value("string"),
90
+ "text": [
91
+ {
92
+ "word": datasets.Value("string"),
93
+ "ne": datasets.Value("string"),
94
+ "label": datasets.Value("string")
95
+ }
96
+ ]
97
+ }
98
+ ]
99
+ }
100
+ )
101
+
102
+ elif self.config.schema == "nusantara_kb":
103
+ features = schemas.kb_features
104
+
105
+ return datasets.DatasetInfo(
106
+ description=_DESCRIPTION,
107
+ features=features,
108
+ homepage=_HOMEPAGE,
109
+ license=_LICENSE,
110
+ citation=_CITATION,
111
+ )
112
+
113
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
114
+ urls = _URLS[_DATASETNAME]
115
+
116
+ data_dir = dl_manager.download_and_extract(urls)
117
+
118
+ return [
119
+ datasets.SplitGenerator(
120
+ name=datasets.Split.TRAIN,
121
+ gen_kwargs={
122
+ "filepath": data_dir["train"],
123
+ "split": "train",
124
+ },
125
+ ),
126
+ datasets.SplitGenerator(
127
+ name=datasets.Split.TEST,
128
+ gen_kwargs={
129
+ "filepath": data_dir["test"],
130
+ "split": "test",
131
+ },
132
+ ),
133
+ ]
134
+
135
+ def _parse_phrase(self, phrase):
136
+ splitted_text = phrase.text.split(" ")
137
+ splitted_ne = []
138
+ if ("ne" in phrase.attrib):
139
+ splitted_ne = phrase.attrib["ne"].split("|")
140
+ words = []
141
+ for i in range(0, len(splitted_text)):
142
+ word = splitted_text[i].split("\\")
143
+ ne = ""
144
+ label = ""
145
+ if (i < len(splitted_ne)):
146
+ ne = splitted_ne[i]
147
+ if (len(word) > 1):
148
+ label = word[1]
149
+ words.append({
150
+ "word": word[0],
151
+ "ne": ne,
152
+ "label": label
153
+ })
154
+
155
+ id = ""
156
+
157
+ if ("id" in phrase.attrib):
158
+ id = phrase.attrib["id"]
159
+
160
+ return {
161
+ "id": id,
162
+ "type": phrase.attrib["type"],
163
+ "text": words
164
+ }
165
+
166
+
167
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
168
+ data = ET.parse(filepath).getroot()
169
+
170
+ for each_sentence in data:
171
+ sentence = {
172
+ "id": each_sentence.attrib["id"],
173
+ "phrases": [],
174
+ }
175
+ for phrase in each_sentence:
176
+ parsed_phrase = self._parse_phrase(phrase)
177
+ sentence["phrases"].append(parsed_phrase)
178
+
179
+ if self.config.schema == "source":
180
+ yield int(each_sentence.attrib["id"]), sentence
181
+
182
+ elif self.config.schema == "nusantara_kb":
183
+ ex = {
184
+ "id": each_sentence.attrib["id"],
185
+ "passages": [],
186
+ "entities": [
187
+ {
188
+ "id": phrase["id"],
189
+ "type": phrase["type"],
190
+ "text": [text["word"] for text in phrase["text"]],
191
+ "offsets": [[0, len(text["word"])] for text in phrase["text"]],
192
+ "normalized": [{
193
+ "db_name": text["ne"],
194
+ "db_id": ""
195
+ } for text in phrase["text"]],
196
+ }
197
+ for phrase in sentence["phrases"]
198
+ ],
199
+ "coreferences": [
200
+ {
201
+ "id": each_sentence.attrib["id"],
202
+ "entity_ids": [phrase["id"] for phrase in sentence["phrases"]]
203
+ }
204
+ ],
205
+ "events": [],
206
+ "relations": [],
207
+ }
208
+ yield int(each_sentence.attrib["id"]), ex